aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-06-08 13:36:43 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:48 -0400
commit6708bb27bb2703da238f21f516034263348af5be (patch)
treea23e1f9eab22933d773d6b6ad6263d6751379a00 /drivers/target
parentec98f7825c6eaa4a9afb0eb518826efc8a2ed4a2 (diff)
target: Follow up core updates from AGrover and HCH (round 4)
This patch contains the squashed version of forth round series cleanups from Andy and Christoph following the post heavy lifting in the preceeding: 'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather' changes. This also includes a conversion of target core and the v3.0 mainline fabric modules (loopback and tcm_fc) to use pr_debug and the CONFIG_DYNAMIC_DEBUG infrastructure! These have been squashed into this third and final round for v3.1. target: Remove ifdeffed code in t_g_process_write target: Remove direct ramdisk code target: Rename task_sg_num to task_sg_nents target: Remove custom debug macros for pr_debug. Use pr_err(). target: Remove custom debug macros in mainline fabrics target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0 target: Remove transport do_se_mem_map callback target: Further simplify transport_free_pages target: Redo task allocation return value handling target: Remove extra parentheses target: change alloc_task call to take *cdb, not *cmd (nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev) Signed-off-by: Andy Grover <agrover@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/loopback/Kconfig6
-rw-r--r--drivers/target/loopback/tcm_loop.c94
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/target_core_alua.c168
-rw-r--r--drivers/target/target_core_cdb.c50
-rw-r--r--drivers/target/target_core_configfs.c464
-rw-r--r--drivers/target/target_core_device.c214
-rw-r--r--drivers/target/target_core_fabric_configfs.c110
-rw-r--r--drivers/target/target_core_fabric_lib.c10
-rw-r--r--drivers/target/target_core_file.c100
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_hba.c12
-rw-r--r--drivers/target/target_core_iblock.c116
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_pr.c454
-rw-r--r--drivers/target/target_core_pscsi.c183
-rw-r--r--drivers/target/target_core_pscsi.h3
-rw-r--r--drivers/target/target_core_rd.c453
-rw-r--r--drivers/target/target_core_rd.h2
-rw-r--r--drivers/target/target_core_tmr.c59
-rw-r--r--drivers/target/target_core_tpg.c75
-rw-r--r--drivers/target/target_core_transport.c690
-rw-r--r--drivers/target/target_core_ua.c30
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h24
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c37
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c33
-rw-r--r--drivers/target/tcm_fc/tfc_io.c8
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c18
28 files changed, 1385 insertions, 2037 deletions
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index 57dcbc2d711b..abe8ecbcdf06 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -3,9 +3,3 @@ config LOOPBACK_TARGET
3 help 3 help
4 Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD 4 Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
5 fabric loopback module. 5 fabric loopback module.
6
7config LOOPBACK_TARGET_CDB_DEBUG
8 bool "TCM loopback fabric module CDB debug code"
9 depends on LOOPBACK_TARGET
10 help
11 Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 99603bc45786..aa2d67997235 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -79,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
79 79
80 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 80 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
81 if (!tl_cmd) { 81 if (!tl_cmd) {
82 printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); 82 pr_err("Unable to allocate struct tcm_loop_cmd\n");
83 set_host_byte(sc, DID_ERROR); 83 set_host_byte(sc, DID_ERROR);
84 return NULL; 84 return NULL;
85 } 85 }
@@ -281,7 +281,7 @@ static int tcm_loop_queuecommand(
281 struct tcm_loop_hba *tl_hba; 281 struct tcm_loop_hba *tl_hba;
282 struct tcm_loop_tpg *tl_tpg; 282 struct tcm_loop_tpg *tl_tpg;
283 283
284 TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" 284 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
285 " scsi_buf_len: %u\n", sc->device->host->host_no, 285 " scsi_buf_len: %u\n", sc->device->host->host_no,
286 sc->device->id, sc->device->channel, sc->device->lun, 286 sc->device->id, sc->device->channel, sc->device->lun,
287 sc->cmnd[0], scsi_bufflen(sc)); 287 sc->cmnd[0], scsi_bufflen(sc));
@@ -331,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
331 */ 331 */
332 tl_nexus = tl_hba->tl_nexus; 332 tl_nexus = tl_hba->tl_nexus;
333 if (!tl_nexus) { 333 if (!tl_nexus) {
334 printk(KERN_ERR "Unable to perform device reset without" 334 pr_err("Unable to perform device reset without"
335 " active I_T Nexus\n"); 335 " active I_T Nexus\n");
336 return FAILED; 336 return FAILED;
337 } 337 }
@@ -344,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
344 344
345 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 345 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
346 if (!tl_cmd) { 346 if (!tl_cmd) {
347 printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); 347 pr_err("Unable to allocate memory for tl_cmd\n");
348 return FAILED; 348 return FAILED;
349 } 349 }
350 350
351 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 351 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
352 if (!tl_tmr) { 352 if (!tl_tmr) {
353 printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); 353 pr_err("Unable to allocate memory for tl_tmr\n");
354 goto release; 354 goto release;
355 } 355 }
356 init_waitqueue_head(&tl_tmr->tl_tmr_wait); 356 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
@@ -435,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev)
435 sh = scsi_host_alloc(&tcm_loop_driver_template, 435 sh = scsi_host_alloc(&tcm_loop_driver_template,
436 sizeof(struct tcm_loop_hba)); 436 sizeof(struct tcm_loop_hba));
437 if (!sh) { 437 if (!sh) {
438 printk(KERN_ERR "Unable to allocate struct scsi_host\n"); 438 pr_err("Unable to allocate struct scsi_host\n");
439 return -ENODEV; 439 return -ENODEV;
440 } 440 }
441 tl_hba->sh = sh; 441 tl_hba->sh = sh;
@@ -454,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev)
454 454
455 error = scsi_add_host(sh, &tl_hba->dev); 455 error = scsi_add_host(sh, &tl_hba->dev);
456 if (error) { 456 if (error) {
457 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 457 pr_err("%s: scsi_add_host failed\n", __func__);
458 scsi_host_put(sh); 458 scsi_host_put(sh);
459 return -ENODEV; 459 return -ENODEV;
460 } 460 }
@@ -495,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
495 495
496 ret = device_register(&tl_hba->dev); 496 ret = device_register(&tl_hba->dev);
497 if (ret) { 497 if (ret) {
498 printk(KERN_ERR "device_register() failed for" 498 pr_err("device_register() failed for"
499 " tl_hba->dev: %d\n", ret); 499 " tl_hba->dev: %d\n", ret);
500 return -ENODEV; 500 return -ENODEV;
501 } 501 }
@@ -513,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void)
513 513
514 tcm_loop_primary = root_device_register("tcm_loop_0"); 514 tcm_loop_primary = root_device_register("tcm_loop_0");
515 if (IS_ERR(tcm_loop_primary)) { 515 if (IS_ERR(tcm_loop_primary)) {
516 printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); 516 pr_err("Unable to allocate tcm_loop_primary\n");
517 return PTR_ERR(tcm_loop_primary); 517 return PTR_ERR(tcm_loop_primary);
518 } 518 }
519 519
520 ret = bus_register(&tcm_loop_lld_bus); 520 ret = bus_register(&tcm_loop_lld_bus);
521 if (ret) { 521 if (ret) {
522 printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); 522 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
523 goto dev_unreg; 523 goto dev_unreg;
524 } 524 }
525 525
526 ret = driver_register(&tcm_loop_driverfs); 526 ret = driver_register(&tcm_loop_driverfs);
527 if (ret) { 527 if (ret) {
528 printk(KERN_ERR "driver_register() failed for" 528 pr_err("driver_register() failed for"
529 "tcm_loop_driverfs\n"); 529 "tcm_loop_driverfs\n");
530 goto bus_unreg; 530 goto bus_unreg;
531 } 531 }
532 532
533 printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); 533 pr_debug("Initialized TCM Loop Core Bus\n");
534 return ret; 534 return ret;
535 535
536bus_unreg: 536bus_unreg:
@@ -546,7 +546,7 @@ static void tcm_loop_release_core_bus(void)
546 bus_unregister(&tcm_loop_lld_bus); 546 bus_unregister(&tcm_loop_lld_bus);
547 root_device_unregister(tcm_loop_primary); 547 root_device_unregister(tcm_loop_primary);
548 548
549 printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); 549 pr_debug("Releasing TCM Loop Core BUS\n");
550} 550}
551 551
552static char *tcm_loop_get_fabric_name(void) 552static char *tcm_loop_get_fabric_name(void)
@@ -574,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
574 case SCSI_PROTOCOL_ISCSI: 574 case SCSI_PROTOCOL_ISCSI:
575 return iscsi_get_fabric_proto_ident(se_tpg); 575 return iscsi_get_fabric_proto_ident(se_tpg);
576 default: 576 default:
577 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 577 pr_err("Unknown tl_proto_id: 0x%02x, using"
578 " SAS emulation\n", tl_hba->tl_proto_id); 578 " SAS emulation\n", tl_hba->tl_proto_id);
579 break; 579 break;
580 } 580 }
@@ -630,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id(
630 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 630 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
631 format_code, buf); 631 format_code, buf);
632 default: 632 default:
633 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 633 pr_err("Unknown tl_proto_id: 0x%02x, using"
634 " SAS emulation\n", tl_hba->tl_proto_id); 634 " SAS emulation\n", tl_hba->tl_proto_id);
635 break; 635 break;
636 } 636 }
@@ -660,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
660 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 660 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
661 format_code); 661 format_code);
662 default: 662 default:
663 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 663 pr_err("Unknown tl_proto_id: 0x%02x, using"
664 " SAS emulation\n", tl_hba->tl_proto_id); 664 " SAS emulation\n", tl_hba->tl_proto_id);
665 break; 665 break;
666 } 666 }
@@ -694,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
694 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 694 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
695 port_nexus_ptr); 695 port_nexus_ptr);
696 default: 696 default:
697 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 697 pr_err("Unknown tl_proto_id: 0x%02x, using"
698 " SAS emulation\n", tl_hba->tl_proto_id); 698 " SAS emulation\n", tl_hba->tl_proto_id);
699 break; 699 break;
700 } 700 }
@@ -743,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
743 743
744 tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); 744 tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
745 if (!tl_nacl) { 745 if (!tl_nacl) {
746 printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); 746 pr_err("Unable to allocate struct tcm_loop_nacl\n");
747 return NULL; 747 return NULL;
748 } 748 }
749 749
@@ -853,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
853 struct tcm_loop_cmd, tl_se_cmd); 853 struct tcm_loop_cmd, tl_se_cmd);
854 struct scsi_cmnd *sc = tl_cmd->sc; 854 struct scsi_cmnd *sc = tl_cmd->sc;
855 855
856 TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 856 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
857 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 857 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
858 858
859 sc->result = SAM_STAT_GOOD; 859 sc->result = SAM_STAT_GOOD;
@@ -868,7 +868,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
868 struct tcm_loop_cmd, tl_se_cmd); 868 struct tcm_loop_cmd, tl_se_cmd);
869 struct scsi_cmnd *sc = tl_cmd->sc; 869 struct scsi_cmnd *sc = tl_cmd->sc;
870 870
871 TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" 871 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
872 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 872 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
873 873
874 if (se_cmd->sense_buffer && 874 if (se_cmd->sense_buffer &&
@@ -943,7 +943,7 @@ static int tcm_loop_port_link(
943 */ 943 */
944 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 944 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
945 945
946 printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); 946 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
947 return 0; 947 return 0;
948} 948}
949 949
@@ -961,7 +961,7 @@ static void tcm_loop_port_unlink(
961 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 961 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
962 se_lun->unpacked_lun); 962 se_lun->unpacked_lun);
963 if (!sd) { 963 if (!sd) {
964 printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" 964 pr_err("Unable to locate struct scsi_device for %d:%d:"
965 "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 965 "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
966 return; 966 return;
967 } 967 }
@@ -974,7 +974,7 @@ static void tcm_loop_port_unlink(
974 atomic_dec(&tl_tpg->tl_tpg_port_count); 974 atomic_dec(&tl_tpg->tl_tpg_port_count);
975 smp_mb__after_atomic_dec(); 975 smp_mb__after_atomic_dec();
976 976
977 printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); 977 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
978} 978}
979 979
980/* End items for tcm_loop_port_cit */ 980/* End items for tcm_loop_port_cit */
@@ -991,14 +991,14 @@ static int tcm_loop_make_nexus(
991 int ret = -ENOMEM; 991 int ret = -ENOMEM;
992 992
993 if (tl_tpg->tl_hba->tl_nexus) { 993 if (tl_tpg->tl_hba->tl_nexus) {
994 printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); 994 pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
995 return -EEXIST; 995 return -EEXIST;
996 } 996 }
997 se_tpg = &tl_tpg->tl_se_tpg; 997 se_tpg = &tl_tpg->tl_se_tpg;
998 998
999 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 999 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
1000 if (!tl_nexus) { 1000 if (!tl_nexus) {
1001 printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); 1001 pr_err("Unable to allocate struct tcm_loop_nexus\n");
1002 return -ENOMEM; 1002 return -ENOMEM;
1003 } 1003 }
1004 /* 1004 /*
@@ -1027,7 +1027,7 @@ static int tcm_loop_make_nexus(
1027 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 1027 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
1028 tl_nexus->se_sess, tl_nexus); 1028 tl_nexus->se_sess, tl_nexus);
1029 tl_tpg->tl_hba->tl_nexus = tl_nexus; 1029 tl_tpg->tl_hba->tl_nexus = tl_nexus;
1030 printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 1030 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
1031 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1031 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1032 name); 1032 name);
1033 return 0; 1033 return 0;
@@ -1053,13 +1053,13 @@ static int tcm_loop_drop_nexus(
1053 return -ENODEV; 1053 return -ENODEV;
1054 1054
1055 if (atomic_read(&tpg->tl_tpg_port_count)) { 1055 if (atomic_read(&tpg->tl_tpg_port_count)) {
1056 printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" 1056 pr_err("Unable to remove TCM_Loop I_T Nexus with"
1057 " active TPG port count: %d\n", 1057 " active TPG port count: %d\n",
1058 atomic_read(&tpg->tl_tpg_port_count)); 1058 atomic_read(&tpg->tl_tpg_port_count));
1059 return -EPERM; 1059 return -EPERM;
1060 } 1060 }
1061 1061
1062 printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 1062 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1063 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1063 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1064 tl_nexus->se_sess->se_node_acl->initiatorname); 1064 tl_nexus->se_sess->se_node_acl->initiatorname);
1065 /* 1065 /*
@@ -1115,7 +1115,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
1115 * tcm_loop_make_nexus() 1115 * tcm_loop_make_nexus()
1116 */ 1116 */
1117 if (strlen(page) >= TL_WWN_ADDR_LEN) { 1117 if (strlen(page) >= TL_WWN_ADDR_LEN) {
1118 printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" 1118 pr_err("Emulated NAA Sas Address: %s, exceeds"
1119 " max: %d\n", page, TL_WWN_ADDR_LEN); 1119 " max: %d\n", page, TL_WWN_ADDR_LEN);
1120 return -EINVAL; 1120 return -EINVAL;
1121 } 1121 }
@@ -1124,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
1124 ptr = strstr(i_port, "naa."); 1124 ptr = strstr(i_port, "naa.");
1125 if (ptr) { 1125 if (ptr) {
1126 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 1126 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
1127 printk(KERN_ERR "Passed SAS Initiator Port %s does not" 1127 pr_err("Passed SAS Initiator Port %s does not"
1128 " match target port protoid: %s\n", i_port, 1128 " match target port protoid: %s\n", i_port,
1129 tcm_loop_dump_proto_id(tl_hba)); 1129 tcm_loop_dump_proto_id(tl_hba));
1130 return -EINVAL; 1130 return -EINVAL;
@@ -1135,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
1135 ptr = strstr(i_port, "fc."); 1135 ptr = strstr(i_port, "fc.");
1136 if (ptr) { 1136 if (ptr) {
1137 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 1137 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
1138 printk(KERN_ERR "Passed FCP Initiator Port %s does not" 1138 pr_err("Passed FCP Initiator Port %s does not"
1139 " match target port protoid: %s\n", i_port, 1139 " match target port protoid: %s\n", i_port,
1140 tcm_loop_dump_proto_id(tl_hba)); 1140 tcm_loop_dump_proto_id(tl_hba));
1141 return -EINVAL; 1141 return -EINVAL;
@@ -1146,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
1146 ptr = strstr(i_port, "iqn."); 1146 ptr = strstr(i_port, "iqn.");
1147 if (ptr) { 1147 if (ptr) {
1148 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 1148 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
1149 printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" 1149 pr_err("Passed iSCSI Initiator Port %s does not"
1150 " match target port protoid: %s\n", i_port, 1150 " match target port protoid: %s\n", i_port,
1151 tcm_loop_dump_proto_id(tl_hba)); 1151 tcm_loop_dump_proto_id(tl_hba));
1152 return -EINVAL; 1152 return -EINVAL;
@@ -1154,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
1154 port_ptr = &i_port[0]; 1154 port_ptr = &i_port[0];
1155 goto check_newline; 1155 goto check_newline;
1156 } 1156 }
1157 printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" 1157 pr_err("Unable to locate prefix for emulated Initiator Port:"
1158 " %s\n", i_port); 1158 " %s\n", i_port);
1159 return -EINVAL; 1159 return -EINVAL;
1160 /* 1160 /*
@@ -1194,7 +1194,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
1194 1194
1195 tpgt_str = strstr(name, "tpgt_"); 1195 tpgt_str = strstr(name, "tpgt_");
1196 if (!tpgt_str) { 1196 if (!tpgt_str) {
1197 printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" 1197 pr_err("Unable to locate \"tpgt_#\" directory"
1198 " group\n"); 1198 " group\n");
1199 return ERR_PTR(-EINVAL); 1199 return ERR_PTR(-EINVAL);
1200 } 1200 }
@@ -1202,7 +1202,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
1202 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); 1202 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1203 1203
1204 if (tpgt >= TL_TPGS_PER_HBA) { 1204 if (tpgt >= TL_TPGS_PER_HBA) {
1205 printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" 1205 pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
1206 " %u\n", tpgt, TL_TPGS_PER_HBA); 1206 " %u\n", tpgt, TL_TPGS_PER_HBA);
1207 return ERR_PTR(-EINVAL); 1207 return ERR_PTR(-EINVAL);
1208 } 1208 }
@@ -1218,7 +1218,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
1218 if (ret < 0) 1218 if (ret < 0)
1219 return ERR_PTR(-ENOMEM); 1219 return ERR_PTR(-ENOMEM);
1220 1220
1221 printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" 1221 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1222 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1222 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1223 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1223 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1224 1224
@@ -1245,7 +1245,7 @@ void tcm_loop_drop_naa_tpg(
1245 */ 1245 */
1246 core_tpg_deregister(se_tpg); 1246 core_tpg_deregister(se_tpg);
1247 1247
1248 printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" 1248 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1249 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1249 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1250 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1250 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1251} 1251}
@@ -1266,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
1266 1266
1267 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1267 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1268 if (!tl_hba) { 1268 if (!tl_hba) {
1269 printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); 1269 pr_err("Unable to allocate struct tcm_loop_hba\n");
1270 return ERR_PTR(-ENOMEM); 1270 return ERR_PTR(-ENOMEM);
1271 } 1271 }
1272 /* 1272 /*
@@ -1286,7 +1286,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
1286 } 1286 }
1287 ptr = strstr(name, "iqn."); 1287 ptr = strstr(name, "iqn.");
1288 if (!ptr) { 1288 if (!ptr) {
1289 printk(KERN_ERR "Unable to locate prefix for emulated Target " 1289 pr_err("Unable to locate prefix for emulated Target "
1290 "Port: %s\n", name); 1290 "Port: %s\n", name);
1291 ret = -EINVAL; 1291 ret = -EINVAL;
1292 goto out; 1292 goto out;
@@ -1295,7 +1295,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
1295 1295
1296check_len: 1296check_len:
1297 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1297 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1298 printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" 1298 pr_err("Emulated NAA %s Address: %s, exceeds"
1299 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1299 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1300 TL_WWN_ADDR_LEN); 1300 TL_WWN_ADDR_LEN);
1301 ret = -EINVAL; 1301 ret = -EINVAL;
@@ -1314,7 +1314,7 @@ check_len:
1314 1314
1315 sh = tl_hba->sh; 1315 sh = tl_hba->sh;
1316 tcm_loop_hba_no_cnt++; 1316 tcm_loop_hba_no_cnt++;
1317 printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" 1317 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1318 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1318 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1319 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1319 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1320 1320
@@ -1337,7 +1337,7 @@ void tcm_loop_drop_scsi_hba(
1337 */ 1337 */
1338 device_unregister(&tl_hba->dev); 1338 device_unregister(&tl_hba->dev);
1339 1339
1340 printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" 1340 pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
1341 " SAS Address: %s at Linux/SCSI Host ID: %d\n", 1341 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1342 config_item_name(&wwn->wwn_group.cg_item), host_no); 1342 config_item_name(&wwn->wwn_group.cg_item), host_no);
1343} 1343}
@@ -1373,7 +1373,7 @@ static int tcm_loop_register_configfs(void)
1373 */ 1373 */
1374 fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); 1374 fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
1375 if (IS_ERR(fabric)) { 1375 if (IS_ERR(fabric)) {
1376 printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); 1376 pr_err("tcm_loop_register_configfs() failed!\n");
1377 return PTR_ERR(fabric); 1377 return PTR_ERR(fabric);
1378 } 1378 }
1379 /* 1379 /*
@@ -1464,7 +1464,7 @@ static int tcm_loop_register_configfs(void)
1464 */ 1464 */
1465 ret = target_fabric_configfs_register(fabric); 1465 ret = target_fabric_configfs_register(fabric);
1466 if (ret < 0) { 1466 if (ret < 0) {
1467 printk(KERN_ERR "target_fabric_configfs_register() for" 1467 pr_err("target_fabric_configfs_register() for"
1468 " TCM_Loop failed!\n"); 1468 " TCM_Loop failed!\n");
1469 target_fabric_configfs_free(fabric); 1469 target_fabric_configfs_free(fabric);
1470 return -1; 1470 return -1;
@@ -1473,7 +1473,7 @@ static int tcm_loop_register_configfs(void)
1473 * Setup our local pointer to *fabric. 1473 * Setup our local pointer to *fabric.
1474 */ 1474 */
1475 tcm_loop_fabric_configfs = fabric; 1475 tcm_loop_fabric_configfs = fabric;
1476 printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" 1476 pr_debug("TCM_LOOP[0] - Set fabric ->"
1477 " tcm_loop_fabric_configfs\n"); 1477 " tcm_loop_fabric_configfs\n");
1478 return 0; 1478 return 0;
1479} 1479}
@@ -1485,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void)
1485 1485
1486 target_fabric_configfs_deregister(tcm_loop_fabric_configfs); 1486 target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
1487 tcm_loop_fabric_configfs = NULL; 1487 tcm_loop_fabric_configfs = NULL;
1488 printk(KERN_INFO "TCM_LOOP[0] - Cleared" 1488 pr_debug("TCM_LOOP[0] - Cleared"
1489 " tcm_loop_fabric_configfs\n"); 1489 " tcm_loop_fabric_configfs\n");
1490} 1490}
1491 1491
@@ -1498,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void)
1498 __alignof__(struct tcm_loop_cmd), 1498 __alignof__(struct tcm_loop_cmd),
1499 0, NULL); 1499 0, NULL);
1500 if (!tcm_loop_cmd_cache) { 1500 if (!tcm_loop_cmd_cache) {
1501 printk(KERN_ERR "kmem_cache_create() for" 1501 pr_debug("kmem_cache_create() for"
1502 " tcm_loop_cmd_cache failed\n"); 1502 " tcm_loop_cmd_cache failed\n");
1503 return -ENOMEM; 1503 return -ENOMEM;
1504 } 1504 }
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7e9f7ab45548..6b76c7a22bb0 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,12 +16,6 @@
16 */ 16 */
17#define TL_SCSI_MAX_CMD_LEN 32 17#define TL_SCSI_MAX_CMD_LEN 32
18 18
19#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
20# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
21#else
22# define TL_CDB_DEBUG(x...)
23#endif
24
25struct tcm_loop_cmd { 19struct tcm_loop_cmd {
26 /* State of Linux/SCSI CDB+Data descriptor */ 20 /* State of Linux/SCSI CDB+Data descriptor */
27 u32 sc_cmd_state; 21 u32 sc_cmd_state;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index dba412ff3718..98c98a3a0250 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -167,7 +167,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
167 int alua_access_state, primary = 0, rc; 167 int alua_access_state, primary = 0, rc;
168 u16 tg_pt_id, rtpi; 168 u16 tg_pt_id, rtpi;
169 169
170 if (!(l_port)) 170 if (!l_port)
171 return PYX_TRANSPORT_LU_COMM_FAILURE; 171 return PYX_TRANSPORT_LU_COMM_FAILURE;
172 172
173 buf = transport_kmap_first_data_page(cmd); 173 buf = transport_kmap_first_data_page(cmd);
@@ -177,24 +177,24 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
177 * for the local tg_pt_gp. 177 * for the local tg_pt_gp.
178 */ 178 */
179 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 179 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
180 if (!(l_tg_pt_gp_mem)) { 180 if (!l_tg_pt_gp_mem) {
181 printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 181 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
182 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 182 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
183 goto out; 183 goto out;
184 } 184 }
185 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 185 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
186 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; 186 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
187 if (!(l_tg_pt_gp)) { 187 if (!l_tg_pt_gp) {
188 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 188 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
189 printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 189 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
190 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 190 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
191 goto out; 191 goto out;
192 } 192 }
193 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); 193 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
194 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 194 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
195 195
196 if (!(rc)) { 196 if (!rc) {
197 printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" 197 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
198 " while TPGS_EXPLICT_ALUA is disabled\n"); 198 " while TPGS_EXPLICT_ALUA is disabled\n");
199 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 199 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
200 goto out; 200 goto out;
@@ -249,7 +249,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
249 list_for_each_entry(tg_pt_gp, 249 list_for_each_entry(tg_pt_gp,
250 &su_dev->t10_alua.tg_pt_gps_list, 250 &su_dev->t10_alua.tg_pt_gps_list,
251 tg_pt_gp_list) { 251 tg_pt_gp_list) {
252 if (!(tg_pt_gp->tg_pt_gp_valid_id)) 252 if (!tg_pt_gp->tg_pt_gp_valid_id)
253 continue; 253 continue;
254 254
255 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 255 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
@@ -498,7 +498,7 @@ static int core_alua_state_check(
498 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 498 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
499 int out_alua_state, nonop_delay_msecs; 499 int out_alua_state, nonop_delay_msecs;
500 500
501 if (!(port)) 501 if (!port)
502 return 0; 502 return 0;
503 /* 503 /*
504 * First, check for a struct se_port specific secondary ALUA target port 504 * First, check for a struct se_port specific secondary ALUA target port
@@ -506,7 +506,7 @@ static int core_alua_state_check(
506 */ 506 */
507 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 507 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
508 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 508 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
509 printk(KERN_INFO "ALUA: Got secondary offline status for local" 509 pr_debug("ALUA: Got secondary offline status for local"
510 " target port\n"); 510 " target port\n");
511 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 511 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
512 return 1; 512 return 1;
@@ -548,7 +548,7 @@ static int core_alua_state_check(
548 */ 548 */
549 case ALUA_ACCESS_STATE_OFFLINE: 549 case ALUA_ACCESS_STATE_OFFLINE:
550 default: 550 default:
551 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", 551 pr_err("Unknown ALUA access state: 0x%02x\n",
552 out_alua_state); 552 out_alua_state);
553 return -EINVAL; 553 return -EINVAL;
554 } 554 }
@@ -580,7 +580,7 @@ static int core_alua_check_transition(int state, int *primary)
580 *primary = 0; 580 *primary = 0;
581 break; 581 break;
582 default: 582 default:
583 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); 583 pr_err("Unknown ALUA access state: 0x%02x\n", state);
584 return -EINVAL; 584 return -EINVAL;
585 } 585 }
586 586
@@ -638,7 +638,7 @@ int core_alua_check_nonop_delay(
638 * The ALUA Active/NonOptimized access state delay can be disabled 638 * The ALUA Active/NonOptimized access state delay can be disabled
639 * in via configfs with a value of zero 639 * in via configfs with a value of zero
640 */ 640 */
641 if (!(cmd->alua_nonop_delay)) 641 if (!cmd->alua_nonop_delay)
642 return 0; 642 return 0;
643 /* 643 /*
644 * struct se_cmd->alua_nonop_delay gets set by a target port group 644 * struct se_cmd->alua_nonop_delay gets set by a target port group
@@ -667,7 +667,7 @@ static int core_alua_write_tpg_metadata(
667 667
668 file = filp_open(path, flags, 0600); 668 file = filp_open(path, flags, 0600);
669 if (IS_ERR(file) || !file || !file->f_dentry) { 669 if (IS_ERR(file) || !file || !file->f_dentry) {
670 printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", 670 pr_err("filp_open(%s) for ALUA metadata failed\n",
671 path); 671 path);
672 return -ENODEV; 672 return -ENODEV;
673 } 673 }
@@ -681,7 +681,7 @@ static int core_alua_write_tpg_metadata(
681 set_fs(old_fs); 681 set_fs(old_fs);
682 682
683 if (ret < 0) { 683 if (ret < 0) {
684 printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); 684 pr_err("Error writing ALUA metadata file: %s\n", path);
685 filp_close(file, NULL); 685 filp_close(file, NULL);
686 return -EIO; 686 return -EIO;
687 } 687 }
@@ -778,7 +778,7 @@ static int core_alua_do_transition_tg_pt(
778 * se_deve->se_lun_acl pointer may be NULL for a 778 * se_deve->se_lun_acl pointer may be NULL for a
779 * entry created without explict Node+MappedLUN ACLs 779 * entry created without explict Node+MappedLUN ACLs
780 */ 780 */
781 if (!(lacl)) 781 if (!lacl)
782 continue; 782 continue;
783 783
784 if (explict && 784 if (explict &&
@@ -820,7 +820,7 @@ static int core_alua_do_transition_tg_pt(
820 */ 820 */
821 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 821 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
822 822
823 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" 823 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
824 " from primary access state %s to %s\n", (explict) ? "explict" : 824 " from primary access state %s to %s\n", (explict) ? "explict" :
825 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 825 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
826 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 826 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
@@ -851,8 +851,8 @@ int core_alua_do_port_transition(
851 return -EINVAL; 851 return -EINVAL;
852 852
853 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); 853 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
854 if (!(md_buf)) { 854 if (!md_buf) {
855 printk("Unable to allocate buf for ALUA metadata\n"); 855 pr_err("Unable to allocate buf for ALUA metadata\n");
856 return -ENOMEM; 856 return -ENOMEM;
857 } 857 }
858 858
@@ -867,7 +867,7 @@ int core_alua_do_port_transition(
867 * we only do transition on the passed *l_tp_pt_gp, and not 867 * we only do transition on the passed *l_tp_pt_gp, and not
868 * on all of the matching target port groups IDs in default_lu_gp. 868 * on all of the matching target port groups IDs in default_lu_gp.
869 */ 869 */
870 if (!(lu_gp->lu_gp_id)) { 870 if (!lu_gp->lu_gp_id) {
871 /* 871 /*
872 * core_alua_do_transition_tg_pt() will always return 872 * core_alua_do_transition_tg_pt() will always return
873 * success. 873 * success.
@@ -899,7 +899,7 @@ int core_alua_do_port_transition(
899 &su_dev->t10_alua.tg_pt_gps_list, 899 &su_dev->t10_alua.tg_pt_gps_list,
900 tg_pt_gp_list) { 900 tg_pt_gp_list) {
901 901
902 if (!(tg_pt_gp->tg_pt_gp_valid_id)) 902 if (!tg_pt_gp->tg_pt_gp_valid_id)
903 continue; 903 continue;
904 /* 904 /*
905 * If the target behavior port asymmetric access state 905 * If the target behavior port asymmetric access state
@@ -941,7 +941,7 @@ int core_alua_do_port_transition(
941 } 941 }
942 spin_unlock(&lu_gp->lu_gp_lock); 942 spin_unlock(&lu_gp->lu_gp_lock);
943 943
944 printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" 944 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
945 " Group IDs: %hu %s transition to primary state: %s\n", 945 " Group IDs: %hu %s transition to primary state: %s\n",
946 config_item_name(&lu_gp->lu_gp_group.cg_item), 946 config_item_name(&lu_gp->lu_gp_group.cg_item),
947 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", 947 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
@@ -1001,9 +1001,9 @@ static int core_alua_set_tg_pt_secondary_state(
1001 1001
1002 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1002 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1003 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1003 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1004 if (!(tg_pt_gp)) { 1004 if (!tg_pt_gp) {
1005 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1005 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1006 printk(KERN_ERR "Unable to complete secondary state" 1006 pr_err("Unable to complete secondary state"
1007 " transition\n"); 1007 " transition\n");
1008 return -EINVAL; 1008 return -EINVAL;
1009 } 1009 }
@@ -1022,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state(
1022 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 1022 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1023 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 1023 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1024 1024
1025 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" 1025 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1026 " to secondary access state: %s\n", (explict) ? "explict" : 1026 " to secondary access state: %s\n", (explict) ? "explict" :
1027 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1027 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1028 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1028 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
@@ -1040,8 +1040,8 @@ static int core_alua_set_tg_pt_secondary_state(
1040 */ 1040 */
1041 if (port->sep_tg_pt_secondary_write_md) { 1041 if (port->sep_tg_pt_secondary_write_md) {
1042 md_buf = kzalloc(md_buf_len, GFP_KERNEL); 1042 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1043 if (!(md_buf)) { 1043 if (!md_buf) {
1044 printk(KERN_ERR "Unable to allocate md_buf for" 1044 pr_err("Unable to allocate md_buf for"
1045 " secondary ALUA access metadata\n"); 1045 " secondary ALUA access metadata\n");
1046 return -ENOMEM; 1046 return -ENOMEM;
1047 } 1047 }
@@ -1062,8 +1062,8 @@ core_alua_allocate_lu_gp(const char *name, int def_group)
1062 struct t10_alua_lu_gp *lu_gp; 1062 struct t10_alua_lu_gp *lu_gp;
1063 1063
1064 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1064 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1065 if (!(lu_gp)) { 1065 if (!lu_gp) {
1066 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); 1066 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1067 return ERR_PTR(-ENOMEM); 1067 return ERR_PTR(-ENOMEM);
1068 } 1068 }
1069 INIT_LIST_HEAD(&lu_gp->lu_gp_node); 1069 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
@@ -1088,14 +1088,14 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1088 * The lu_gp->lu_gp_id may only be set once.. 1088 * The lu_gp->lu_gp_id may only be set once..
1089 */ 1089 */
1090 if (lu_gp->lu_gp_valid_id) { 1090 if (lu_gp->lu_gp_valid_id) {
1091 printk(KERN_WARNING "ALUA LU Group already has a valid ID," 1091 pr_warn("ALUA LU Group already has a valid ID,"
1092 " ignoring request\n"); 1092 " ignoring request\n");
1093 return -EINVAL; 1093 return -EINVAL;
1094 } 1094 }
1095 1095
1096 spin_lock(&lu_gps_lock); 1096 spin_lock(&lu_gps_lock);
1097 if (alua_lu_gps_count == 0x0000ffff) { 1097 if (alua_lu_gps_count == 0x0000ffff) {
1098 printk(KERN_ERR "Maximum ALUA alua_lu_gps_count:" 1098 pr_err("Maximum ALUA alua_lu_gps_count:"
1099 " 0x0000ffff reached\n"); 1099 " 0x0000ffff reached\n");
1100 spin_unlock(&lu_gps_lock); 1100 spin_unlock(&lu_gps_lock);
1101 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1101 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
@@ -1107,10 +1107,10 @@ again:
1107 1107
1108 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1108 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1109 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1109 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1110 if (!(lu_gp_id)) 1110 if (!lu_gp_id)
1111 goto again; 1111 goto again;
1112 1112
1113 printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" 1113 pr_warn("ALUA Logical Unit Group ID: %hu"
1114 " already exists, ignoring request\n", 1114 " already exists, ignoring request\n",
1115 lu_gp_id); 1115 lu_gp_id);
1116 spin_unlock(&lu_gps_lock); 1116 spin_unlock(&lu_gps_lock);
@@ -1133,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev)
1133 struct t10_alua_lu_gp_member *lu_gp_mem; 1133 struct t10_alua_lu_gp_member *lu_gp_mem;
1134 1134
1135 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1135 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1136 if (!(lu_gp_mem)) { 1136 if (!lu_gp_mem) {
1137 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); 1137 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1138 return ERR_PTR(-ENOMEM); 1138 return ERR_PTR(-ENOMEM);
1139 } 1139 }
1140 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1140 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
@@ -1218,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
1218 return; 1218 return;
1219 1219
1220 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1220 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1221 if (!(lu_gp_mem)) 1221 if (!lu_gp_mem)
1222 return; 1222 return;
1223 1223
1224 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1224 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
@@ -1226,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
1226 1226
1227 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1227 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1228 lu_gp = lu_gp_mem->lu_gp; 1228 lu_gp = lu_gp_mem->lu_gp;
1229 if ((lu_gp)) { 1229 if (lu_gp) {
1230 spin_lock(&lu_gp->lu_gp_lock); 1230 spin_lock(&lu_gp->lu_gp_lock);
1231 if (lu_gp_mem->lu_gp_assoc) { 1231 if (lu_gp_mem->lu_gp_assoc) {
1232 list_del(&lu_gp_mem->lu_gp_mem_list); 1232 list_del(&lu_gp_mem->lu_gp_mem_list);
@@ -1248,10 +1248,10 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1248 1248
1249 spin_lock(&lu_gps_lock); 1249 spin_lock(&lu_gps_lock);
1250 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1250 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1251 if (!(lu_gp->lu_gp_valid_id)) 1251 if (!lu_gp->lu_gp_valid_id)
1252 continue; 1252 continue;
1253 ci = &lu_gp->lu_gp_group.cg_item; 1253 ci = &lu_gp->lu_gp_group.cg_item;
1254 if (!(strcmp(config_item_name(ci), name))) { 1254 if (!strcmp(config_item_name(ci), name)) {
1255 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1255 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1256 spin_unlock(&lu_gps_lock); 1256 spin_unlock(&lu_gps_lock);
1257 return lu_gp; 1257 return lu_gp;
@@ -1307,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1307 struct t10_alua_tg_pt_gp *tg_pt_gp; 1307 struct t10_alua_tg_pt_gp *tg_pt_gp;
1308 1308
1309 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1309 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1310 if (!(tg_pt_gp)) { 1310 if (!tg_pt_gp) {
1311 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); 1311 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1312 return NULL; 1312 return NULL;
1313 } 1313 }
1314 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1314 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
@@ -1356,14 +1356,14 @@ int core_alua_set_tg_pt_gp_id(
1356 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1356 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1357 */ 1357 */
1358 if (tg_pt_gp->tg_pt_gp_valid_id) { 1358 if (tg_pt_gp->tg_pt_gp_valid_id) {
1359 printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," 1359 pr_warn("ALUA TG PT Group already has a valid ID,"
1360 " ignoring request\n"); 1360 " ignoring request\n");
1361 return -EINVAL; 1361 return -EINVAL;
1362 } 1362 }
1363 1363
1364 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1364 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1365 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1365 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1366 printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" 1366 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1367 " 0x0000ffff reached\n"); 1367 " 0x0000ffff reached\n");
1368 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1368 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1369 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1369 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
@@ -1376,10 +1376,10 @@ again:
1376 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, 1376 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1377 tg_pt_gp_list) { 1377 tg_pt_gp_list) {
1378 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1378 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1379 if (!(tg_pt_gp_id)) 1379 if (!tg_pt_gp_id)
1380 goto again; 1380 goto again;
1381 1381
1382 printk(KERN_ERR "ALUA Target Port Group ID: %hu already" 1382 pr_err("ALUA Target Port Group ID: %hu already"
1383 " exists, ignoring request\n", tg_pt_gp_id); 1383 " exists, ignoring request\n", tg_pt_gp_id);
1384 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1384 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1385 return -EINVAL; 1385 return -EINVAL;
@@ -1403,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1403 1403
1404 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, 1404 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1405 GFP_KERNEL); 1405 GFP_KERNEL);
1406 if (!(tg_pt_gp_mem)) { 1406 if (!tg_pt_gp_mem) {
1407 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1407 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1408 return ERR_PTR(-ENOMEM); 1408 return ERR_PTR(-ENOMEM);
1409 } 1409 }
1410 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1410 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1491,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1491 return; 1491 return;
1492 1492
1493 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1493 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1494 if (!(tg_pt_gp_mem)) 1494 if (!tg_pt_gp_mem)
1495 return; 1495 return;
1496 1496
1497 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) 1497 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
@@ -1499,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1499 1499
1500 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1500 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1501 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1501 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1502 if ((tg_pt_gp)) { 1502 if (tg_pt_gp) {
1503 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1503 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1504 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1504 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1505 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1505 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1524,10 +1524,10 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1524 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1524 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1525 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 1525 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1526 tg_pt_gp_list) { 1526 tg_pt_gp_list) {
1527 if (!(tg_pt_gp->tg_pt_gp_valid_id)) 1527 if (!tg_pt_gp->tg_pt_gp_valid_id)
1528 continue; 1528 continue;
1529 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1529 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1530 if (!(strcmp(config_item_name(ci), name))) { 1530 if (!strcmp(config_item_name(ci), name)) {
1531 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1531 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1532 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1532 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1533 return tg_pt_gp; 1533 return tg_pt_gp;
@@ -1592,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1592 return len; 1592 return len;
1593 1593
1594 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1594 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1595 if (!(tg_pt_gp_mem)) 1595 if (!tg_pt_gp_mem)
1596 return len; 1596 return len;
1597 1597
1598 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1598 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1599 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1599 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1600 if ((tg_pt_gp)) { 1600 if (tg_pt_gp) {
1601 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1601 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1602 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1602 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1603 " %hu\nTG Port Primary Access State: %s\nTG Port " 1603 " %hu\nTG Port Primary Access State: %s\nTG Port "
@@ -1634,7 +1634,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1634 lun = port->sep_lun; 1634 lun = port->sep_lun;
1635 1635
1636 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1636 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1637 printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" 1637 pr_warn("SPC3_ALUA_EMULATED not enabled for"
1638 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1638 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1639 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1639 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1640 config_item_name(&lun->lun_group.cg_item)); 1640 config_item_name(&lun->lun_group.cg_item));
@@ -1642,7 +1642,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1642 } 1642 }
1643 1643
1644 if (count > TG_PT_GROUP_NAME_BUF) { 1644 if (count > TG_PT_GROUP_NAME_BUF) {
1645 printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); 1645 pr_err("ALUA Target Port Group alias too large!\n");
1646 return -EINVAL; 1646 return -EINVAL;
1647 } 1647 }
1648 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 1648 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
@@ -1659,26 +1659,26 @@ ssize_t core_alua_store_tg_pt_gp_info(
1659 */ 1659 */
1660 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, 1660 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1661 strstrip(buf)); 1661 strstrip(buf));
1662 if (!(tg_pt_gp_new)) 1662 if (!tg_pt_gp_new)
1663 return -ENODEV; 1663 return -ENODEV;
1664 } 1664 }
1665 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1665 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1666 if (!(tg_pt_gp_mem)) { 1666 if (!tg_pt_gp_mem) {
1667 if (tg_pt_gp_new) 1667 if (tg_pt_gp_new)
1668 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1668 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1669 printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); 1669 pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1670 return -EINVAL; 1670 return -EINVAL;
1671 } 1671 }
1672 1672
1673 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1673 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1674 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1674 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1675 if ((tg_pt_gp)) { 1675 if (tg_pt_gp) {
1676 /* 1676 /*
1677 * Clearing an existing tg_pt_gp association, and replacing 1677 * Clearing an existing tg_pt_gp association, and replacing
1678 * with the default_tg_pt_gp. 1678 * with the default_tg_pt_gp.
1679 */ 1679 */
1680 if (!(tg_pt_gp_new)) { 1680 if (!tg_pt_gp_new) {
1681 printk(KERN_INFO "Target_Core_ConfigFS: Moving" 1681 pr_debug("Target_Core_ConfigFS: Moving"
1682 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1682 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1683 " alua/%s, ID: %hu back to" 1683 " alua/%s, ID: %hu back to"
1684 " default_tg_pt_gp\n", 1684 " default_tg_pt_gp\n",
@@ -1707,7 +1707,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1707 */ 1707 */
1708 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); 1708 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1709 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1709 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1710 printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1710 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1711 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1711 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1712 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1712 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1713 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1713 tpg->se_tpg_tfo->tpg_get_tag(tpg),
@@ -1744,11 +1744,11 @@ ssize_t core_alua_store_access_type(
1744 1744
1745 ret = strict_strtoul(page, 0, &tmp); 1745 ret = strict_strtoul(page, 0, &tmp);
1746 if (ret < 0) { 1746 if (ret < 0) {
1747 printk(KERN_ERR "Unable to extract alua_access_type\n"); 1747 pr_err("Unable to extract alua_access_type\n");
1748 return -EINVAL; 1748 return -EINVAL;
1749 } 1749 }
1750 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 1750 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1751 printk(KERN_ERR "Illegal value for alua_access_type:" 1751 pr_err("Illegal value for alua_access_type:"
1752 " %lu\n", tmp); 1752 " %lu\n", tmp);
1753 return -EINVAL; 1753 return -EINVAL;
1754 } 1754 }
@@ -1782,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs(
1782 1782
1783 ret = strict_strtoul(page, 0, &tmp); 1783 ret = strict_strtoul(page, 0, &tmp);
1784 if (ret < 0) { 1784 if (ret < 0) {
1785 printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); 1785 pr_err("Unable to extract nonop_delay_msecs\n");
1786 return -EINVAL; 1786 return -EINVAL;
1787 } 1787 }
1788 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 1788 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1789 printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" 1789 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1790 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 1790 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1791 ALUA_MAX_NONOP_DELAY_MSECS); 1791 ALUA_MAX_NONOP_DELAY_MSECS);
1792 return -EINVAL; 1792 return -EINVAL;
@@ -1813,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs(
1813 1813
1814 ret = strict_strtoul(page, 0, &tmp); 1814 ret = strict_strtoul(page, 0, &tmp);
1815 if (ret < 0) { 1815 if (ret < 0) {
1816 printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); 1816 pr_err("Unable to extract trans_delay_msecs\n");
1817 return -EINVAL; 1817 return -EINVAL;
1818 } 1818 }
1819 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 1819 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1820 printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" 1820 pr_err("Passed trans_delay_msecs: %lu, exceeds"
1821 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 1821 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1822 ALUA_MAX_TRANS_DELAY_MSECS); 1822 ALUA_MAX_TRANS_DELAY_MSECS);
1823 return -EINVAL; 1823 return -EINVAL;
@@ -1844,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit(
1844 1844
1845 ret = strict_strtoul(page, 0, &tmp); 1845 ret = strict_strtoul(page, 0, &tmp);
1846 if (ret < 0) { 1846 if (ret < 0) {
1847 printk(KERN_ERR "Unable to extract preferred ALUA value\n"); 1847 pr_err("Unable to extract preferred ALUA value\n");
1848 return -EINVAL; 1848 return -EINVAL;
1849 } 1849 }
1850 if ((tmp != 0) && (tmp != 1)) { 1850 if ((tmp != 0) && (tmp != 1)) {
1851 printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); 1851 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1852 return -EINVAL; 1852 return -EINVAL;
1853 } 1853 }
1854 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 1854 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
@@ -1858,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit(
1858 1858
1859ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 1859ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1860{ 1860{
1861 if (!(lun->lun_sep)) 1861 if (!lun->lun_sep)
1862 return -ENODEV; 1862 return -ENODEV;
1863 1863
1864 return sprintf(page, "%d\n", 1864 return sprintf(page, "%d\n",
@@ -1874,22 +1874,22 @@ ssize_t core_alua_store_offline_bit(
1874 unsigned long tmp; 1874 unsigned long tmp;
1875 int ret; 1875 int ret;
1876 1876
1877 if (!(lun->lun_sep)) 1877 if (!lun->lun_sep)
1878 return -ENODEV; 1878 return -ENODEV;
1879 1879
1880 ret = strict_strtoul(page, 0, &tmp); 1880 ret = strict_strtoul(page, 0, &tmp);
1881 if (ret < 0) { 1881 if (ret < 0) {
1882 printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); 1882 pr_err("Unable to extract alua_tg_pt_offline value\n");
1883 return -EINVAL; 1883 return -EINVAL;
1884 } 1884 }
1885 if ((tmp != 0) && (tmp != 1)) { 1885 if ((tmp != 0) && (tmp != 1)) {
1886 printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", 1886 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1887 tmp); 1887 tmp);
1888 return -EINVAL; 1888 return -EINVAL;
1889 } 1889 }
1890 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; 1890 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1891 if (!(tg_pt_gp_mem)) { 1891 if (!tg_pt_gp_mem) {
1892 printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); 1892 pr_err("Unable to locate *tg_pt_gp_mem\n");
1893 return -EINVAL; 1893 return -EINVAL;
1894 } 1894 }
1895 1895
@@ -1918,13 +1918,13 @@ ssize_t core_alua_store_secondary_status(
1918 1918
1919 ret = strict_strtoul(page, 0, &tmp); 1919 ret = strict_strtoul(page, 0, &tmp);
1920 if (ret < 0) { 1920 if (ret < 0) {
1921 printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); 1921 pr_err("Unable to extract alua_tg_pt_status\n");
1922 return -EINVAL; 1922 return -EINVAL;
1923 } 1923 }
1924 if ((tmp != ALUA_STATUS_NONE) && 1924 if ((tmp != ALUA_STATUS_NONE) &&
1925 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 1925 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1926 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 1926 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1927 printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", 1927 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1928 tmp); 1928 tmp);
1929 return -EINVAL; 1929 return -EINVAL;
1930 } 1930 }
@@ -1951,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata(
1951 1951
1952 ret = strict_strtoul(page, 0, &tmp); 1952 ret = strict_strtoul(page, 0, &tmp);
1953 if (ret < 0) { 1953 if (ret < 0) {
1954 printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); 1954 pr_err("Unable to extract alua_tg_pt_write_md\n");
1955 return -EINVAL; 1955 return -EINVAL;
1956 } 1956 }
1957 if ((tmp != 0) && (tmp != 1)) { 1957 if ((tmp != 0) && (tmp != 1)) {
1958 printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" 1958 pr_err("Illegal value for alua_tg_pt_write_md:"
1959 " %lu\n", tmp); 1959 " %lu\n", tmp);
1960 return -EINVAL; 1960 return -EINVAL;
1961 } 1961 }
@@ -1979,7 +1979,7 @@ int core_setup_alua(struct se_device *dev, int force_pt)
1979 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { 1979 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
1980 alua->alua_type = SPC_ALUA_PASSTHROUGH; 1980 alua->alua_type = SPC_ALUA_PASSTHROUGH;
1981 alua->alua_state_check = &core_alua_state_check_nop; 1981 alua->alua_state_check = &core_alua_state_check_nop;
1982 printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" 1982 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
1983 " emulation\n", dev->transport->name); 1983 " emulation\n", dev->transport->name);
1984 return 0; 1984 return 0;
1985 } 1985 }
@@ -1988,7 +1988,7 @@ int core_setup_alua(struct se_device *dev, int force_pt)
1988 * use emulated ALUA. 1988 * use emulated ALUA.
1989 */ 1989 */
1990 if (dev->transport->get_device_rev(dev) >= SCSI_3) { 1990 if (dev->transport->get_device_rev(dev) >= SCSI_3) {
1991 printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" 1991 pr_debug("%s: Enabling ALUA Emulation for SPC-3"
1992 " device\n", dev->transport->name); 1992 " device\n", dev->transport->name);
1993 /* 1993 /*
1994 * Associate this struct se_device with the default ALUA 1994 * Associate this struct se_device with the default ALUA
@@ -2005,13 +2005,13 @@ int core_setup_alua(struct se_device *dev, int force_pt)
2005 default_lu_gp); 2005 default_lu_gp);
2006 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2006 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2007 2007
2008 printk(KERN_INFO "%s: Adding to default ALUA LU Group:" 2008 pr_debug("%s: Adding to default ALUA LU Group:"
2009 " core/alua/lu_gps/default_lu_gp\n", 2009 " core/alua/lu_gps/default_lu_gp\n",
2010 dev->transport->name); 2010 dev->transport->name);
2011 } else { 2011 } else {
2012 alua->alua_type = SPC2_ALUA_DISABLED; 2012 alua->alua_type = SPC2_ALUA_DISABLED;
2013 alua->alua_state_check = &core_alua_state_check_nop; 2013 alua->alua_state_check = &core_alua_state_check_nop;
2014 printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" 2014 pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2015 " device\n", dev->transport->name); 2015 " device\n", dev->transport->name);
2016 } 2016 }
2017 2017
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 418282d926fa..982830023661 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -73,7 +73,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
73 * payload going back for EVPD=0 73 * payload going back for EVPD=0
74 */ 74 */
75 if (cmd->data_length < 6) { 75 if (cmd->data_length < 6) {
76 printk(KERN_ERR "SCSI Inquiry payload length: %u" 76 pr_err("SCSI Inquiry payload length: %u"
77 " too small for EVPD=0\n", cmd->data_length); 77 " too small for EVPD=0\n", cmd->data_length);
78 return -EINVAL; 78 return -EINVAL;
79 } 79 }
@@ -327,7 +327,7 @@ check_tpgi:
327 327
328 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 328 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
329 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 329 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
330 if (!(tg_pt_gp)) { 330 if (!tg_pt_gp) {
331 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 331 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
332 goto check_lu_gp; 332 goto check_lu_gp;
333 } 333 }
@@ -358,12 +358,12 @@ check_lu_gp:
358 goto check_scsi_name; 358 goto check_scsi_name;
359 } 359 }
360 lu_gp_mem = dev->dev_alua_lu_gp_mem; 360 lu_gp_mem = dev->dev_alua_lu_gp_mem;
361 if (!(lu_gp_mem)) 361 if (!lu_gp_mem)
362 goto check_scsi_name; 362 goto check_scsi_name;
363 363
364 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 364 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
365 lu_gp = lu_gp_mem->lu_gp; 365 lu_gp = lu_gp_mem->lu_gp;
366 if (!(lu_gp)) { 366 if (!lu_gp) {
367 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 367 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
368 goto check_scsi_name; 368 goto check_scsi_name;
369 } 369 }
@@ -475,14 +475,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
475 have_tp = 1; 475 have_tp = 1;
476 476
477 if (cmd->data_length < (0x10 + 4)) { 477 if (cmd->data_length < (0x10 + 4)) {
478 printk(KERN_INFO "Received data_length: %u" 478 pr_debug("Received data_length: %u"
479 " too small for EVPD 0xb0\n", 479 " too small for EVPD 0xb0\n",
480 cmd->data_length); 480 cmd->data_length);
481 return -EINVAL; 481 return -EINVAL;
482 } 482 }
483 483
484 if (have_tp && cmd->data_length < (0x3c + 4)) { 484 if (have_tp && cmd->data_length < (0x3c + 4)) {
485 printk(KERN_INFO "Received data_length: %u" 485 pr_debug("Received data_length: %u"
486 " too small for TPE=1 EVPD 0xb0\n", 486 " too small for TPE=1 EVPD 0xb0\n",
487 cmd->data_length); 487 cmd->data_length);
488 have_tp = 0; 488 have_tp = 0;
@@ -491,6 +491,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
491 buf[0] = dev->transport->get_device_type(dev); 491 buf[0] = dev->transport->get_device_type(dev);
492 buf[3] = have_tp ? 0x3c : 0x10; 492 buf[3] = have_tp ? 0x3c : 0x10;
493 493
494 /* Set WSNZ to 1 */
495 buf[4] = 0x01;
496
494 /* 497 /*
495 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 498 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
496 */ 499 */
@@ -667,7 +670,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
667 * payload length left for the next outgoing EVPD metadata 670 * payload length left for the next outgoing EVPD metadata
668 */ 671 */
669 if (cmd->data_length < 4) { 672 if (cmd->data_length < 4) {
670 printk(KERN_ERR "SCSI Inquiry payload length: %u" 673 pr_err("SCSI Inquiry payload length: %u"
671 " too small for EVPD=1\n", cmd->data_length); 674 " too small for EVPD=1\n", cmd->data_length);
672 return -EINVAL; 675 return -EINVAL;
673 } 676 }
@@ -685,7 +688,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
685 } 688 }
686 689
687 transport_kunmap_first_data_page(cmd); 690 transport_kunmap_first_data_page(cmd);
688 printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); 691 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
689 return -EINVAL; 692 return -EINVAL;
690} 693}
691 694
@@ -891,7 +894,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
891 length += target_modesense_control(dev, &buf[offset+length]); 894 length += target_modesense_control(dev, &buf[offset+length]);
892 break; 895 break;
893 default: 896 default:
894 printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", 897 pr_err("Got Unknown Mode Page: 0x%02x\n",
895 cdb[2] & 0x3f); 898 cdb[2] & 0x3f);
896 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 899 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
897 } 900 }
@@ -947,14 +950,14 @@ target_emulate_request_sense(struct se_cmd *cmd)
947 int err = 0; 950 int err = 0;
948 951
949 if (cdb[1] & 0x01) { 952 if (cdb[1] & 0x01) {
950 printk(KERN_ERR "REQUEST_SENSE description emulation not" 953 pr_err("REQUEST_SENSE description emulation not"
951 " supported\n"); 954 " supported\n");
952 return PYX_TRANSPORT_INVALID_CDB_FIELD; 955 return PYX_TRANSPORT_INVALID_CDB_FIELD;
953 } 956 }
954 957
955 buf = transport_kmap_first_data_page(cmd); 958 buf = transport_kmap_first_data_page(cmd);
956 959
957 if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { 960 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
958 /* 961 /*
959 * CURRENT ERROR, UNIT ATTENTION 962 * CURRENT ERROR, UNIT ATTENTION
960 */ 963 */
@@ -1028,18 +1031,18 @@ target_emulate_unmap(struct se_task *task)
1028 buf = transport_kmap_first_data_page(cmd); 1031 buf = transport_kmap_first_data_page(cmd);
1029 1032
1030 ptr = &buf[offset]; 1033 ptr = &buf[offset];
1031 printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" 1034 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
1032 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1035 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1033 1036
1034 while (size) { 1037 while (size) {
1035 lba = get_unaligned_be64(&ptr[0]); 1038 lba = get_unaligned_be64(&ptr[0]);
1036 range = get_unaligned_be32(&ptr[8]); 1039 range = get_unaligned_be32(&ptr[8]);
1037 printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", 1040 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1038 (unsigned long long)lba, range); 1041 (unsigned long long)lba, range);
1039 1042
1040 ret = dev->transport->do_discard(dev, lba, range); 1043 ret = dev->transport->do_discard(dev, lba, range);
1041 if (ret < 0) { 1044 if (ret < 0) {
1042 printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", 1045 pr_err("blkdev_issue_discard() failed: %d\n",
1043 ret); 1046 ret);
1044 goto err; 1047 goto err;
1045 } 1048 }
@@ -1084,12 +1087,12 @@ target_emulate_write_same(struct se_task *task, int write_same32)
1084 else 1087 else
1085 range = (dev->transport->get_blocks(dev) - lba); 1088 range = (dev->transport->get_blocks(dev) - lba);
1086 1089
1087 printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", 1090 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
1088 (unsigned long long)lba, (unsigned long long)range); 1091 (unsigned long long)lba, (unsigned long long)range);
1089 1092
1090 ret = dev->transport->do_discard(dev, lba, range); 1093 ret = dev->transport->do_discard(dev, lba, range);
1091 if (ret < 0) { 1094 if (ret < 0) {
1092 printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); 1095 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
1093 return ret; 1096 return ret;
1094 } 1097 }
1095 1098
@@ -1125,7 +1128,7 @@ transport_emulate_control_cdb(struct se_task *task)
1125 ret = target_emulate_readcapacity_16(cmd); 1128 ret = target_emulate_readcapacity_16(cmd);
1126 break; 1129 break;
1127 default: 1130 default:
1128 printk(KERN_ERR "Unsupported SA: 0x%02x\n", 1131 pr_err("Unsupported SA: 0x%02x\n",
1129 cmd->t_task_cdb[1] & 0x1f); 1132 cmd->t_task_cdb[1] & 0x1f);
1130 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1133 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1131 } 1134 }
@@ -1135,7 +1138,7 @@ transport_emulate_control_cdb(struct se_task *task)
1135 break; 1138 break;
1136 case UNMAP: 1139 case UNMAP:
1137 if (!dev->transport->do_discard) { 1140 if (!dev->transport->do_discard) {
1138 printk(KERN_ERR "UNMAP emulation not supported for: %s\n", 1141 pr_err("UNMAP emulation not supported for: %s\n",
1139 dev->transport->name); 1142 dev->transport->name);
1140 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1143 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1141 } 1144 }
@@ -1143,7 +1146,7 @@ transport_emulate_control_cdb(struct se_task *task)
1143 break; 1146 break;
1144 case WRITE_SAME_16: 1147 case WRITE_SAME_16:
1145 if (!dev->transport->do_discard) { 1148 if (!dev->transport->do_discard) {
1146 printk(KERN_ERR "WRITE_SAME_16 emulation not supported" 1149 pr_err("WRITE_SAME_16 emulation not supported"
1147 " for: %s\n", dev->transport->name); 1150 " for: %s\n", dev->transport->name);
1148 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1151 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1149 } 1152 }
@@ -1155,7 +1158,7 @@ transport_emulate_control_cdb(struct se_task *task)
1155 switch (service_action) { 1158 switch (service_action) {
1156 case WRITE_SAME_32: 1159 case WRITE_SAME_32:
1157 if (!dev->transport->do_discard) { 1160 if (!dev->transport->do_discard) {
1158 printk(KERN_ERR "WRITE_SAME_32 SA emulation not" 1161 pr_err("WRITE_SAME_32 SA emulation not"
1159 " supported for: %s\n", 1162 " supported for: %s\n",
1160 dev->transport->name); 1163 dev->transport->name);
1161 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1164 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
@@ -1163,7 +1166,7 @@ transport_emulate_control_cdb(struct se_task *task)
1163 ret = target_emulate_write_same(task, 1); 1166 ret = target_emulate_write_same(task, 1);
1164 break; 1167 break;
1165 default: 1168 default:
1166 printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" 1169 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
1167 " 0x%02x\n", service_action); 1170 " 0x%02x\n", service_action);
1168 break; 1171 break;
1169 } 1172 }
@@ -1171,8 +1174,7 @@ transport_emulate_control_cdb(struct se_task *task)
1171 case SYNCHRONIZE_CACHE: 1174 case SYNCHRONIZE_CACHE:
1172 case 0x91: /* SYNCHRONIZE_CACHE_16: */ 1175 case 0x91: /* SYNCHRONIZE_CACHE_16: */
1173 if (!dev->transport->do_sync_cache) { 1176 if (!dev->transport->do_sync_cache) {
1174 printk(KERN_ERR 1177 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1175 "SYNCHRONIZE_CACHE emulation not supported"
1176 " for: %s\n", dev->transport->name); 1178 " for: %s\n", dev->transport->name);
1177 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1179 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1178 } 1180 }
@@ -1189,7 +1191,7 @@ transport_emulate_control_cdb(struct se_task *task)
1189 case WRITE_FILEMARKS: 1191 case WRITE_FILEMARKS:
1190 break; 1192 break;
1191 default: 1193 default:
1192 printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", 1194 pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
1193 cmd->t_task_cdb[0], dev->transport->name); 1195 cmd->t_task_cdb[0], dev->transport->name);
1194 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1196 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1195 } 1197 }
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 6b00810b8dcb..e56c39daeec6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -104,12 +104,12 @@ static struct target_fabric_configfs *target_core_get_fabric(
104{ 104{
105 struct target_fabric_configfs *tf; 105 struct target_fabric_configfs *tf;
106 106
107 if (!(name)) 107 if (!name)
108 return NULL; 108 return NULL;
109 109
110 mutex_lock(&g_tf_lock); 110 mutex_lock(&g_tf_lock);
111 list_for_each_entry(tf, &g_tf_list, tf_list) { 111 list_for_each_entry(tf, &g_tf_list, tf_list) {
112 if (!(strcmp(tf->tf_name, name))) { 112 if (!strcmp(tf->tf_name, name)) {
113 atomic_inc(&tf->tf_access_cnt); 113 atomic_inc(&tf->tf_access_cnt);
114 mutex_unlock(&g_tf_lock); 114 mutex_unlock(&g_tf_lock);
115 return tf; 115 return tf;
@@ -130,7 +130,7 @@ static struct config_group *target_core_register_fabric(
130 struct target_fabric_configfs *tf; 130 struct target_fabric_configfs *tf;
131 int ret; 131 int ret;
132 132
133 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:" 133 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
134 " %s\n", group, name); 134 " %s\n", group, name);
135 /* 135 /*
136 * Ensure that TCM subsystem plugins are loaded at this point for 136 * Ensure that TCM subsystem plugins are loaded at this point for
@@ -150,7 +150,7 @@ static struct config_group *target_core_register_fabric(
150 * registered, but simply provids auto loading logic for modules with 150 * registered, but simply provids auto loading logic for modules with
151 * mkdir(2) system calls with known TCM fabric modules. 151 * mkdir(2) system calls with known TCM fabric modules.
152 */ 152 */
153 if (!(strncmp(name, "iscsi", 5))) { 153 if (!strncmp(name, "iscsi", 5)) {
154 /* 154 /*
155 * Automatically load the LIO Target fabric module when the 155 * Automatically load the LIO Target fabric module when the
156 * following is called: 156 * following is called:
@@ -159,11 +159,11 @@ static struct config_group *target_core_register_fabric(
159 */ 159 */
160 ret = request_module("iscsi_target_mod"); 160 ret = request_module("iscsi_target_mod");
161 if (ret < 0) { 161 if (ret < 0) {
162 printk(KERN_ERR "request_module() failed for" 162 pr_err("request_module() failed for"
163 " iscsi_target_mod.ko: %d\n", ret); 163 " iscsi_target_mod.ko: %d\n", ret);
164 return ERR_PTR(-EINVAL); 164 return ERR_PTR(-EINVAL);
165 } 165 }
166 } else if (!(strncmp(name, "loopback", 8))) { 166 } else if (!strncmp(name, "loopback", 8)) {
167 /* 167 /*
168 * Automatically load the tcm_loop fabric module when the 168 * Automatically load the tcm_loop fabric module when the
169 * following is called: 169 * following is called:
@@ -172,25 +172,25 @@ static struct config_group *target_core_register_fabric(
172 */ 172 */
173 ret = request_module("tcm_loop"); 173 ret = request_module("tcm_loop");
174 if (ret < 0) { 174 if (ret < 0) {
175 printk(KERN_ERR "request_module() failed for" 175 pr_err("request_module() failed for"
176 " tcm_loop.ko: %d\n", ret); 176 " tcm_loop.ko: %d\n", ret);
177 return ERR_PTR(-EINVAL); 177 return ERR_PTR(-EINVAL);
178 } 178 }
179 } 179 }
180 180
181 tf = target_core_get_fabric(name); 181 tf = target_core_get_fabric(name);
182 if (!(tf)) { 182 if (!tf) {
183 printk(KERN_ERR "target_core_get_fabric() failed for %s\n", 183 pr_err("target_core_get_fabric() failed for %s\n",
184 name); 184 name);
185 return ERR_PTR(-EINVAL); 185 return ERR_PTR(-EINVAL);
186 } 186 }
187 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:" 187 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
188 " %s\n", tf->tf_name); 188 " %s\n", tf->tf_name);
189 /* 189 /*
190 * On a successful target_core_get_fabric() look, the returned 190 * On a successful target_core_get_fabric() look, the returned
191 * struct target_fabric_configfs *tf will contain a usage reference. 191 * struct target_fabric_configfs *tf will contain a usage reference.
192 */ 192 */
193 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 193 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
194 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 194 &TF_CIT_TMPL(tf)->tfc_wwn_cit);
195 195
196 tf->tf_group.default_groups = tf->tf_default_groups; 196 tf->tf_group.default_groups = tf->tf_default_groups;
@@ -202,14 +202,14 @@ static struct config_group *target_core_register_fabric(
202 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", 202 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
203 &TF_CIT_TMPL(tf)->tfc_discovery_cit); 203 &TF_CIT_TMPL(tf)->tfc_discovery_cit);
204 204
205 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 205 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
206 " %s\n", tf->tf_group.cg_item.ci_name); 206 " %s\n", tf->tf_group.cg_item.ci_name);
207 /* 207 /*
208 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() 208 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
209 */ 209 */
210 tf->tf_ops.tf_subsys = tf->tf_subsys; 210 tf->tf_ops.tf_subsys = tf->tf_subsys;
211 tf->tf_fabric = &tf->tf_group.cg_item; 211 tf->tf_fabric = &tf->tf_group.cg_item;
212 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" 212 pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
213 " for %s\n", name); 213 " for %s\n", name);
214 214
215 return &tf->tf_group; 215 return &tf->tf_group;
@@ -228,18 +228,18 @@ static void target_core_deregister_fabric(
228 struct config_item *df_item; 228 struct config_item *df_item;
229 int i; 229 int i;
230 230
231 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" 231 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
232 " tf list\n", config_item_name(item)); 232 " tf list\n", config_item_name(item));
233 233
234 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:" 234 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
235 " %s\n", tf->tf_name); 235 " %s\n", tf->tf_name);
236 atomic_dec(&tf->tf_access_cnt); 236 atomic_dec(&tf->tf_access_cnt);
237 237
238 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing" 238 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
239 " tf->tf_fabric for %s\n", tf->tf_name); 239 " tf->tf_fabric for %s\n", tf->tf_name);
240 tf->tf_fabric = NULL; 240 tf->tf_fabric = NULL;
241 241
242 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 242 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
243 " %s\n", config_item_name(item)); 243 " %s\n", config_item_name(item));
244 244
245 tf_group = &tf->tf_group; 245 tf_group = &tf->tf_group;
@@ -307,17 +307,17 @@ struct target_fabric_configfs *target_fabric_configfs_init(
307 struct target_fabric_configfs *tf; 307 struct target_fabric_configfs *tf;
308 308
309 if (!(name)) { 309 if (!(name)) {
310 printk(KERN_ERR "Unable to locate passed fabric name\n"); 310 pr_err("Unable to locate passed fabric name\n");
311 return ERR_PTR(-EINVAL); 311 return ERR_PTR(-EINVAL);
312 } 312 }
313 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { 313 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
314 printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" 314 pr_err("Passed name: %s exceeds TARGET_FABRIC"
315 "_NAME_SIZE\n", name); 315 "_NAME_SIZE\n", name);
316 return ERR_PTR(-EINVAL); 316 return ERR_PTR(-EINVAL);
317 } 317 }
318 318
319 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 319 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
320 if (!(tf)) 320 if (!tf)
321 return ERR_PTR(-ENOMEM); 321 return ERR_PTR(-ENOMEM);
322 322
323 INIT_LIST_HEAD(&tf->tf_list); 323 INIT_LIST_HEAD(&tf->tf_list);
@@ -336,9 +336,9 @@ struct target_fabric_configfs *target_fabric_configfs_init(
336 list_add_tail(&tf->tf_list, &g_tf_list); 336 list_add_tail(&tf->tf_list, &g_tf_list);
337 mutex_unlock(&g_tf_lock); 337 mutex_unlock(&g_tf_lock);
338 338
339 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" 339 pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
340 ">>>>>>>>>>>>>>\n"); 340 ">>>>>>>>>>>>>>\n");
341 printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for" 341 pr_debug("Initialized struct target_fabric_configfs: %p for"
342 " %s\n", tf, tf->tf_name); 342 " %s\n", tf, tf->tf_name);
343 return tf; 343 return tf;
344} 344}
@@ -367,132 +367,132 @@ static int target_fabric_tf_ops_check(
367{ 367{
368 struct target_core_fabric_ops *tfo = &tf->tf_ops; 368 struct target_core_fabric_ops *tfo = &tf->tf_ops;
369 369
370 if (!(tfo->get_fabric_name)) { 370 if (!tfo->get_fabric_name) {
371 printk(KERN_ERR "Missing tfo->get_fabric_name()\n"); 371 pr_err("Missing tfo->get_fabric_name()\n");
372 return -EINVAL; 372 return -EINVAL;
373 } 373 }
374 if (!(tfo->get_fabric_proto_ident)) { 374 if (!tfo->get_fabric_proto_ident) {
375 printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n"); 375 pr_err("Missing tfo->get_fabric_proto_ident()\n");
376 return -EINVAL; 376 return -EINVAL;
377 } 377 }
378 if (!(tfo->tpg_get_wwn)) { 378 if (!tfo->tpg_get_wwn) {
379 printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n"); 379 pr_err("Missing tfo->tpg_get_wwn()\n");
380 return -EINVAL; 380 return -EINVAL;
381 } 381 }
382 if (!(tfo->tpg_get_tag)) { 382 if (!tfo->tpg_get_tag) {
383 printk(KERN_ERR "Missing tfo->tpg_get_tag()\n"); 383 pr_err("Missing tfo->tpg_get_tag()\n");
384 return -EINVAL; 384 return -EINVAL;
385 } 385 }
386 if (!(tfo->tpg_get_default_depth)) { 386 if (!tfo->tpg_get_default_depth) {
387 printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n"); 387 pr_err("Missing tfo->tpg_get_default_depth()\n");
388 return -EINVAL; 388 return -EINVAL;
389 } 389 }
390 if (!(tfo->tpg_get_pr_transport_id)) { 390 if (!tfo->tpg_get_pr_transport_id) {
391 printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n"); 391 pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
392 return -EINVAL; 392 return -EINVAL;
393 } 393 }
394 if (!(tfo->tpg_get_pr_transport_id_len)) { 394 if (!tfo->tpg_get_pr_transport_id_len) {
395 printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n"); 395 pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
396 return -EINVAL; 396 return -EINVAL;
397 } 397 }
398 if (!(tfo->tpg_check_demo_mode)) { 398 if (!tfo->tpg_check_demo_mode) {
399 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n"); 399 pr_err("Missing tfo->tpg_check_demo_mode()\n");
400 return -EINVAL; 400 return -EINVAL;
401 } 401 }
402 if (!(tfo->tpg_check_demo_mode_cache)) { 402 if (!tfo->tpg_check_demo_mode_cache) {
403 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n"); 403 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
404 return -EINVAL; 404 return -EINVAL;
405 } 405 }
406 if (!(tfo->tpg_check_demo_mode_write_protect)) { 406 if (!tfo->tpg_check_demo_mode_write_protect) {
407 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n"); 407 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
408 return -EINVAL; 408 return -EINVAL;
409 } 409 }
410 if (!(tfo->tpg_check_prod_mode_write_protect)) { 410 if (!tfo->tpg_check_prod_mode_write_protect) {
411 printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n"); 411 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
412 return -EINVAL; 412 return -EINVAL;
413 } 413 }
414 if (!(tfo->tpg_alloc_fabric_acl)) { 414 if (!tfo->tpg_alloc_fabric_acl) {
415 printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n"); 415 pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
416 return -EINVAL; 416 return -EINVAL;
417 } 417 }
418 if (!(tfo->tpg_release_fabric_acl)) { 418 if (!tfo->tpg_release_fabric_acl) {
419 printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n"); 419 pr_err("Missing tfo->tpg_release_fabric_acl()\n");
420 return -EINVAL; 420 return -EINVAL;
421 } 421 }
422 if (!(tfo->tpg_get_inst_index)) { 422 if (!tfo->tpg_get_inst_index) {
423 printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); 423 pr_err("Missing tfo->tpg_get_inst_index()\n");
424 return -EINVAL; 424 return -EINVAL;
425 } 425 }
426 if (!tfo->release_cmd) { 426 if (!tfo->release_cmd) {
427 printk(KERN_ERR "Missing tfo->release_cmd()\n"); 427 pr_err("Missing tfo->release_cmd()\n");
428 return -EINVAL; 428 return -EINVAL;
429 } 429 }
430 if (!(tfo->shutdown_session)) { 430 if (!tfo->shutdown_session) {
431 printk(KERN_ERR "Missing tfo->shutdown_session()\n"); 431 pr_err("Missing tfo->shutdown_session()\n");
432 return -EINVAL; 432 return -EINVAL;
433 } 433 }
434 if (!(tfo->close_session)) { 434 if (!tfo->close_session) {
435 printk(KERN_ERR "Missing tfo->close_session()\n"); 435 pr_err("Missing tfo->close_session()\n");
436 return -EINVAL; 436 return -EINVAL;
437 } 437 }
438 if (!(tfo->stop_session)) { 438 if (!tfo->stop_session) {
439 printk(KERN_ERR "Missing tfo->stop_session()\n"); 439 pr_err("Missing tfo->stop_session()\n");
440 return -EINVAL; 440 return -EINVAL;
441 } 441 }
442 if (!(tfo->fall_back_to_erl0)) { 442 if (!tfo->fall_back_to_erl0) {
443 printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n"); 443 pr_err("Missing tfo->fall_back_to_erl0()\n");
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
446 if (!(tfo->sess_logged_in)) { 446 if (!tfo->sess_logged_in) {
447 printk(KERN_ERR "Missing tfo->sess_logged_in()\n"); 447 pr_err("Missing tfo->sess_logged_in()\n");
448 return -EINVAL; 448 return -EINVAL;
449 } 449 }
450 if (!(tfo->sess_get_index)) { 450 if (!tfo->sess_get_index) {
451 printk(KERN_ERR "Missing tfo->sess_get_index()\n"); 451 pr_err("Missing tfo->sess_get_index()\n");
452 return -EINVAL; 452 return -EINVAL;
453 } 453 }
454 if (!(tfo->write_pending)) { 454 if (!tfo->write_pending) {
455 printk(KERN_ERR "Missing tfo->write_pending()\n"); 455 pr_err("Missing tfo->write_pending()\n");
456 return -EINVAL; 456 return -EINVAL;
457 } 457 }
458 if (!(tfo->write_pending_status)) { 458 if (!tfo->write_pending_status) {
459 printk(KERN_ERR "Missing tfo->write_pending_status()\n"); 459 pr_err("Missing tfo->write_pending_status()\n");
460 return -EINVAL; 460 return -EINVAL;
461 } 461 }
462 if (!(tfo->set_default_node_attributes)) { 462 if (!tfo->set_default_node_attributes) {
463 printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n"); 463 pr_err("Missing tfo->set_default_node_attributes()\n");
464 return -EINVAL; 464 return -EINVAL;
465 } 465 }
466 if (!(tfo->get_task_tag)) { 466 if (!tfo->get_task_tag) {
467 printk(KERN_ERR "Missing tfo->get_task_tag()\n"); 467 pr_err("Missing tfo->get_task_tag()\n");
468 return -EINVAL; 468 return -EINVAL;
469 } 469 }
470 if (!(tfo->get_cmd_state)) { 470 if (!tfo->get_cmd_state) {
471 printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); 471 pr_err("Missing tfo->get_cmd_state()\n");
472 return -EINVAL; 472 return -EINVAL;
473 } 473 }
474 if (!(tfo->queue_data_in)) { 474 if (!tfo->queue_data_in) {
475 printk(KERN_ERR "Missing tfo->queue_data_in()\n"); 475 pr_err("Missing tfo->queue_data_in()\n");
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 if (!(tfo->queue_status)) { 478 if (!tfo->queue_status) {
479 printk(KERN_ERR "Missing tfo->queue_status()\n"); 479 pr_err("Missing tfo->queue_status()\n");
480 return -EINVAL; 480 return -EINVAL;
481 } 481 }
482 if (!(tfo->queue_tm_rsp)) { 482 if (!tfo->queue_tm_rsp) {
483 printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n"); 483 pr_err("Missing tfo->queue_tm_rsp()\n");
484 return -EINVAL; 484 return -EINVAL;
485 } 485 }
486 if (!(tfo->set_fabric_sense_len)) { 486 if (!tfo->set_fabric_sense_len) {
487 printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n"); 487 pr_err("Missing tfo->set_fabric_sense_len()\n");
488 return -EINVAL; 488 return -EINVAL;
489 } 489 }
490 if (!(tfo->get_fabric_sense_len)) { 490 if (!tfo->get_fabric_sense_len) {
491 printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n"); 491 pr_err("Missing tfo->get_fabric_sense_len()\n");
492 return -EINVAL; 492 return -EINVAL;
493 } 493 }
494 if (!(tfo->is_state_remove)) { 494 if (!tfo->is_state_remove) {
495 printk(KERN_ERR "Missing tfo->is_state_remove()\n"); 495 pr_err("Missing tfo->is_state_remove()\n");
496 return -EINVAL; 496 return -EINVAL;
497 } 497 }
498 /* 498 /*
@@ -500,20 +500,20 @@ static int target_fabric_tf_ops_check(
500 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 500 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
501 * target_core_fabric_configfs.c WWN+TPG group context code. 501 * target_core_fabric_configfs.c WWN+TPG group context code.
502 */ 502 */
503 if (!(tfo->fabric_make_wwn)) { 503 if (!tfo->fabric_make_wwn) {
504 printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n"); 504 pr_err("Missing tfo->fabric_make_wwn()\n");
505 return -EINVAL; 505 return -EINVAL;
506 } 506 }
507 if (!(tfo->fabric_drop_wwn)) { 507 if (!tfo->fabric_drop_wwn) {
508 printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n"); 508 pr_err("Missing tfo->fabric_drop_wwn()\n");
509 return -EINVAL; 509 return -EINVAL;
510 } 510 }
511 if (!(tfo->fabric_make_tpg)) { 511 if (!tfo->fabric_make_tpg) {
512 printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n"); 512 pr_err("Missing tfo->fabric_make_tpg()\n");
513 return -EINVAL; 513 return -EINVAL;
514 } 514 }
515 if (!(tfo->fabric_drop_tpg)) { 515 if (!tfo->fabric_drop_tpg) {
516 printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n"); 516 pr_err("Missing tfo->fabric_drop_tpg()\n");
517 return -EINVAL; 517 return -EINVAL;
518 } 518 }
519 519
@@ -533,13 +533,13 @@ int target_fabric_configfs_register(
533{ 533{
534 int ret; 534 int ret;
535 535
536 if (!(tf)) { 536 if (!tf) {
537 printk(KERN_ERR "Unable to locate target_fabric_configfs" 537 pr_err("Unable to locate target_fabric_configfs"
538 " pointer\n"); 538 " pointer\n");
539 return -EINVAL; 539 return -EINVAL;
540 } 540 }
541 if (!(tf->tf_subsys)) { 541 if (!tf->tf_subsys) {
542 printk(KERN_ERR "Unable to target struct config_subsystem" 542 pr_err("Unable to target struct config_subsystem"
543 " pointer\n"); 543 " pointer\n");
544 return -EINVAL; 544 return -EINVAL;
545 } 545 }
@@ -547,7 +547,7 @@ int target_fabric_configfs_register(
547 if (ret < 0) 547 if (ret < 0)
548 return ret; 548 return ret;
549 549
550 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" 550 pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
551 ">>>>>>>>>>\n"); 551 ">>>>>>>>>>\n");
552 return 0; 552 return 0;
553} 553}
@@ -558,36 +558,36 @@ void target_fabric_configfs_deregister(
558{ 558{
559 struct configfs_subsystem *su; 559 struct configfs_subsystem *su;
560 560
561 if (!(tf)) { 561 if (!tf) {
562 printk(KERN_ERR "Unable to locate passed target_fabric_" 562 pr_err("Unable to locate passed target_fabric_"
563 "configfs\n"); 563 "configfs\n");
564 return; 564 return;
565 } 565 }
566 su = tf->tf_subsys; 566 su = tf->tf_subsys;
567 if (!(su)) { 567 if (!su) {
568 printk(KERN_ERR "Unable to locate passed tf->tf_subsys" 568 pr_err("Unable to locate passed tf->tf_subsys"
569 " pointer\n"); 569 " pointer\n");
570 return; 570 return;
571 } 571 }
572 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" 572 pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
573 ">>>>>>>>>>>>\n"); 573 ">>>>>>>>>>>>\n");
574 mutex_lock(&g_tf_lock); 574 mutex_lock(&g_tf_lock);
575 if (atomic_read(&tf->tf_access_cnt)) { 575 if (atomic_read(&tf->tf_access_cnt)) {
576 mutex_unlock(&g_tf_lock); 576 mutex_unlock(&g_tf_lock);
577 printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n", 577 pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
578 tf->tf_name); 578 tf->tf_name);
579 BUG(); 579 BUG();
580 } 580 }
581 list_del(&tf->tf_list); 581 list_del(&tf->tf_list);
582 mutex_unlock(&g_tf_lock); 582 mutex_unlock(&g_tf_lock);
583 583
584 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" 584 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
585 " %s\n", tf->tf_name); 585 " %s\n", tf->tf_name);
586 tf->tf_module = NULL; 586 tf->tf_module = NULL;
587 tf->tf_subsys = NULL; 587 tf->tf_subsys = NULL;
588 kfree(tf); 588 kfree(tf);
589 589
590 printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" 590 pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
591 ">>>>>\n"); 591 ">>>>>\n");
592} 592}
593EXPORT_SYMBOL(target_fabric_configfs_deregister); 593EXPORT_SYMBOL(target_fabric_configfs_deregister);
@@ -609,7 +609,7 @@ static ssize_t target_core_dev_show_attr_##_name( \
609 \ 609 \
610 spin_lock(&se_dev->se_dev_lock); \ 610 spin_lock(&se_dev->se_dev_lock); \
611 dev = se_dev->se_dev_ptr; \ 611 dev = se_dev->se_dev_ptr; \
612 if (!(dev)) { \ 612 if (!dev) { \
613 spin_unlock(&se_dev->se_dev_lock); \ 613 spin_unlock(&se_dev->se_dev_lock); \
614 return -ENODEV; \ 614 return -ENODEV; \
615 } \ 615 } \
@@ -633,14 +633,14 @@ static ssize_t target_core_dev_store_attr_##_name( \
633 \ 633 \
634 spin_lock(&se_dev->se_dev_lock); \ 634 spin_lock(&se_dev->se_dev_lock); \
635 dev = se_dev->se_dev_ptr; \ 635 dev = se_dev->se_dev_ptr; \
636 if (!(dev)) { \ 636 if (!dev) { \
637 spin_unlock(&se_dev->se_dev_lock); \ 637 spin_unlock(&se_dev->se_dev_lock); \
638 return -ENODEV; \ 638 return -ENODEV; \
639 } \ 639 } \
640 ret = strict_strtoul(page, 0, &val); \ 640 ret = strict_strtoul(page, 0, &val); \
641 if (ret < 0) { \ 641 if (ret < 0) { \
642 spin_unlock(&se_dev->se_dev_lock); \ 642 spin_unlock(&se_dev->se_dev_lock); \
643 printk(KERN_ERR "strict_strtoul() failed with" \ 643 pr_err("strict_strtoul() failed with" \
644 " ret: %d\n", ret); \ 644 " ret: %d\n", ret); \
645 return -EINVAL; \ 645 return -EINVAL; \
646 } \ 646 } \
@@ -806,7 +806,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
806 struct se_device *dev; 806 struct se_device *dev;
807 807
808 dev = se_dev->se_dev_ptr; 808 dev = se_dev->se_dev_ptr;
809 if (!(dev)) 809 if (!dev)
810 return -ENODEV; 810 return -ENODEV;
811 811
812 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", 812 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
@@ -833,13 +833,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
833 * VPD Unit Serial Number that OS dependent multipath can depend on. 833 * VPD Unit Serial Number that OS dependent multipath can depend on.
834 */ 834 */
835 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { 835 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
836 printk(KERN_ERR "Underlying SCSI device firmware provided VPD" 836 pr_err("Underlying SCSI device firmware provided VPD"
837 " Unit Serial, ignoring request\n"); 837 " Unit Serial, ignoring request\n");
838 return -EOPNOTSUPP; 838 return -EOPNOTSUPP;
839 } 839 }
840 840
841 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { 841 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
842 printk(KERN_ERR "Emulated VPD Unit Serial exceeds" 842 pr_err("Emulated VPD Unit Serial exceeds"
843 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); 843 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
844 return -EOVERFLOW; 844 return -EOVERFLOW;
845 } 845 }
@@ -850,9 +850,9 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
850 * could cause negative effects. 850 * could cause negative effects.
851 */ 851 */
852 dev = su_dev->se_dev_ptr; 852 dev = su_dev->se_dev_ptr;
853 if ((dev)) { 853 if (dev) {
854 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 854 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
855 printk(KERN_ERR "Unable to set VPD Unit Serial while" 855 pr_err("Unable to set VPD Unit Serial while"
856 " active %d $FABRIC_MOD exports exist\n", 856 " active %d $FABRIC_MOD exports exist\n",
857 atomic_read(&dev->dev_export_obj.obj_access_count)); 857 atomic_read(&dev->dev_export_obj.obj_access_count));
858 return -EINVAL; 858 return -EINVAL;
@@ -870,7 +870,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
870 "%s", strstrip(buf)); 870 "%s", strstrip(buf));
871 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; 871 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
872 872
873 printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 873 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
874 " %s\n", su_dev->t10_wwn.unit_serial); 874 " %s\n", su_dev->t10_wwn.unit_serial);
875 875
876 return count; 876 return count;
@@ -892,19 +892,19 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
892 ssize_t len = 0; 892 ssize_t len = 0;
893 893
894 dev = se_dev->se_dev_ptr; 894 dev = se_dev->se_dev_ptr;
895 if (!(dev)) 895 if (!dev)
896 return -ENODEV; 896 return -ENODEV;
897 897
898 memset(buf, 0, VPD_TMP_BUF_SIZE); 898 memset(buf, 0, VPD_TMP_BUF_SIZE);
899 899
900 spin_lock(&t10_wwn->t10_vpd_lock); 900 spin_lock(&t10_wwn->t10_vpd_lock);
901 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { 901 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
902 if (!(vpd->protocol_identifier_set)) 902 if (!vpd->protocol_identifier_set)
903 continue; 903 continue;
904 904
905 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); 905 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
906 906
907 if ((len + strlen(buf) >= PAGE_SIZE)) 907 if (len + strlen(buf) >= PAGE_SIZE)
908 break; 908 break;
909 909
910 len += sprintf(page+len, "%s", buf); 910 len += sprintf(page+len, "%s", buf);
@@ -939,7 +939,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
939 ssize_t len = 0; \ 939 ssize_t len = 0; \
940 \ 940 \
941 dev = se_dev->se_dev_ptr; \ 941 dev = se_dev->se_dev_ptr; \
942 if (!(dev)) \ 942 if (!dev) \
943 return -ENODEV; \ 943 return -ENODEV; \
944 \ 944 \
945 spin_lock(&t10_wwn->t10_vpd_lock); \ 945 spin_lock(&t10_wwn->t10_vpd_lock); \
@@ -949,19 +949,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
949 \ 949 \
950 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 950 memset(buf, 0, VPD_TMP_BUF_SIZE); \
951 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ 951 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
952 if ((len + strlen(buf) >= PAGE_SIZE)) \ 952 if (len + strlen(buf) >= PAGE_SIZE) \
953 break; \ 953 break; \
954 len += sprintf(page+len, "%s", buf); \ 954 len += sprintf(page+len, "%s", buf); \
955 \ 955 \
956 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 956 memset(buf, 0, VPD_TMP_BUF_SIZE); \
957 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ 957 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
958 if ((len + strlen(buf) >= PAGE_SIZE)) \ 958 if (len + strlen(buf) >= PAGE_SIZE) \
959 break; \ 959 break; \
960 len += sprintf(page+len, "%s", buf); \ 960 len += sprintf(page+len, "%s", buf); \
961 \ 961 \
962 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 962 memset(buf, 0, VPD_TMP_BUF_SIZE); \
963 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ 963 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
964 if ((len + strlen(buf) >= PAGE_SIZE)) \ 964 if (len + strlen(buf) >= PAGE_SIZE) \
965 break; \ 965 break; \
966 len += sprintf(page+len, "%s", buf); \ 966 len += sprintf(page+len, "%s", buf); \
967 } \ 967 } \
@@ -1070,7 +1070,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
1070 1070
1071 spin_lock(&dev->dev_reservation_lock); 1071 spin_lock(&dev->dev_reservation_lock);
1072 pr_reg = dev->dev_pr_res_holder; 1072 pr_reg = dev->dev_pr_res_holder;
1073 if (!(pr_reg)) { 1073 if (!pr_reg) {
1074 *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); 1074 *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
1075 spin_unlock(&dev->dev_reservation_lock); 1075 spin_unlock(&dev->dev_reservation_lock);
1076 return *len; 1076 return *len;
@@ -1096,7 +1096,7 @@ static ssize_t target_core_dev_pr_show_spc2_res(
1096 1096
1097 spin_lock(&dev->dev_reservation_lock); 1097 spin_lock(&dev->dev_reservation_lock);
1098 se_nacl = dev->dev_reserved_node_acl; 1098 se_nacl = dev->dev_reserved_node_acl;
1099 if (!(se_nacl)) { 1099 if (!se_nacl) {
1100 *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); 1100 *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
1101 spin_unlock(&dev->dev_reservation_lock); 1101 spin_unlock(&dev->dev_reservation_lock);
1102 return *len; 1102 return *len;
@@ -1115,7 +1115,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(
1115{ 1115{
1116 ssize_t len = 0; 1116 ssize_t len = 0;
1117 1117
1118 if (!(su_dev->se_dev_ptr)) 1118 if (!su_dev->se_dev_ptr)
1119 return -ENODEV; 1119 return -ENODEV;
1120 1120
1121 switch (su_dev->t10_pr.res_type) { 1121 switch (su_dev->t10_pr.res_type) {
@@ -1152,7 +1152,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1152 ssize_t len = 0; 1152 ssize_t len = 0;
1153 1153
1154 dev = su_dev->se_dev_ptr; 1154 dev = su_dev->se_dev_ptr;
1155 if (!(dev)) 1155 if (!dev)
1156 return -ENODEV; 1156 return -ENODEV;
1157 1157
1158 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1158 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1160,7 +1160,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1160 1160
1161 spin_lock(&dev->dev_reservation_lock); 1161 spin_lock(&dev->dev_reservation_lock);
1162 pr_reg = dev->dev_pr_res_holder; 1162 pr_reg = dev->dev_pr_res_holder;
1163 if (!(pr_reg)) { 1163 if (!pr_reg) {
1164 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1164 len = sprintf(page, "No SPC-3 Reservation holder\n");
1165 spin_unlock(&dev->dev_reservation_lock); 1165 spin_unlock(&dev->dev_reservation_lock);
1166 return len; 1166 return len;
@@ -1189,7 +1189,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
1189 struct se_subsystem_dev *su_dev, 1189 struct se_subsystem_dev *su_dev,
1190 char *page) 1190 char *page)
1191{ 1191{
1192 if (!(su_dev->se_dev_ptr)) 1192 if (!su_dev->se_dev_ptr)
1193 return -ENODEV; 1193 return -ENODEV;
1194 1194
1195 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1195 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1216,7 +1216,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1216 ssize_t len = 0; 1216 ssize_t len = 0;
1217 1217
1218 dev = su_dev->se_dev_ptr; 1218 dev = su_dev->se_dev_ptr;
1219 if (!(dev)) 1219 if (!dev)
1220 return -ENODEV; 1220 return -ENODEV;
1221 1221
1222 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1222 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1224,7 +1224,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1224 1224
1225 spin_lock(&dev->dev_reservation_lock); 1225 spin_lock(&dev->dev_reservation_lock);
1226 pr_reg = dev->dev_pr_res_holder; 1226 pr_reg = dev->dev_pr_res_holder;
1227 if (!(pr_reg)) { 1227 if (!pr_reg) {
1228 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1228 len = sprintf(page, "No SPC-3 Reservation holder\n");
1229 spin_unlock(&dev->dev_reservation_lock); 1229 spin_unlock(&dev->dev_reservation_lock);
1230 return len; 1230 return len;
@@ -1263,7 +1263,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1263 ssize_t len = 0; 1263 ssize_t len = 0;
1264 int reg_count = 0, prf_isid; 1264 int reg_count = 0, prf_isid;
1265 1265
1266 if (!(su_dev->se_dev_ptr)) 1266 if (!su_dev->se_dev_ptr)
1267 return -ENODEV; 1267 return -ENODEV;
1268 1268
1269 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1269 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1286,7 +1286,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1286 &i_buf[0] : "", pr_reg->pr_res_key, 1286 &i_buf[0] : "", pr_reg->pr_res_key,
1287 pr_reg->pr_res_generation); 1287 pr_reg->pr_res_generation);
1288 1288
1289 if ((len + strlen(buf) >= PAGE_SIZE)) 1289 if (len + strlen(buf) >= PAGE_SIZE)
1290 break; 1290 break;
1291 1291
1292 len += sprintf(page+len, "%s", buf); 1292 len += sprintf(page+len, "%s", buf);
@@ -1294,7 +1294,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1294 } 1294 }
1295 spin_unlock(&su_dev->t10_pr.registration_lock); 1295 spin_unlock(&su_dev->t10_pr.registration_lock);
1296 1296
1297 if (!(reg_count)) 1297 if (!reg_count)
1298 len += sprintf(page+len, "None\n"); 1298 len += sprintf(page+len, "None\n");
1299 1299
1300 return len; 1300 return len;
@@ -1314,7 +1314,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1314 ssize_t len = 0; 1314 ssize_t len = 0;
1315 1315
1316 dev = su_dev->se_dev_ptr; 1316 dev = su_dev->se_dev_ptr;
1317 if (!(dev)) 1317 if (!dev)
1318 return -ENODEV; 1318 return -ENODEV;
1319 1319
1320 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1320 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1322,7 +1322,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1322 1322
1323 spin_lock(&dev->dev_reservation_lock); 1323 spin_lock(&dev->dev_reservation_lock);
1324 pr_reg = dev->dev_pr_res_holder; 1324 pr_reg = dev->dev_pr_res_holder;
1325 if (!(pr_reg)) { 1325 if (!pr_reg) {
1326 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1326 len = sprintf(page, "No SPC-3 Reservation holder\n");
1327 spin_unlock(&dev->dev_reservation_lock); 1327 spin_unlock(&dev->dev_reservation_lock);
1328 return len; 1328 return len;
@@ -1345,7 +1345,7 @@ static ssize_t target_core_dev_pr_show_attr_res_type(
1345{ 1345{
1346 ssize_t len = 0; 1346 ssize_t len = 0;
1347 1347
1348 if (!(su_dev->se_dev_ptr)) 1348 if (!su_dev->se_dev_ptr)
1349 return -ENODEV; 1349 return -ENODEV;
1350 1350
1351 switch (su_dev->t10_pr.res_type) { 1351 switch (su_dev->t10_pr.res_type) {
@@ -1376,7 +1376,7 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
1376 struct se_subsystem_dev *su_dev, 1376 struct se_subsystem_dev *su_dev,
1377 char *page) 1377 char *page)
1378{ 1378{
1379 if (!(su_dev->se_dev_ptr)) 1379 if (!su_dev->se_dev_ptr)
1380 return -ENODEV; 1380 return -ENODEV;
1381 1381
1382 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1382 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1395,7 +1395,7 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
1395 struct se_subsystem_dev *su_dev, 1395 struct se_subsystem_dev *su_dev,
1396 char *page) 1396 char *page)
1397{ 1397{
1398 if (!(su_dev->se_dev_ptr)) 1398 if (!su_dev->se_dev_ptr)
1399 return -ENODEV; 1399 return -ENODEV;
1400 1400
1401 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1401 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1447,14 +1447,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1447 u8 type = 0, scope; 1447 u8 type = 0, scope;
1448 1448
1449 dev = su_dev->se_dev_ptr; 1449 dev = su_dev->se_dev_ptr;
1450 if (!(dev)) 1450 if (!dev)
1451 return -ENODEV; 1451 return -ENODEV;
1452 1452
1453 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1453 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1454 return 0; 1454 return 0;
1455 1455
1456 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1456 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1457 printk(KERN_INFO "Unable to process APTPL metadata while" 1457 pr_debug("Unable to process APTPL metadata while"
1458 " active fabric exports exist\n"); 1458 " active fabric exports exist\n");
1459 return -EINVAL; 1459 return -EINVAL;
1460 } 1460 }
@@ -1484,7 +1484,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1484 goto out; 1484 goto out;
1485 } 1485 }
1486 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { 1486 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1487 printk(KERN_ERR "APTPL metadata initiator_node=" 1487 pr_err("APTPL metadata initiator_node="
1488 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 1488 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1489 PR_APTPL_MAX_IPORT_LEN); 1489 PR_APTPL_MAX_IPORT_LEN);
1490 ret = -EINVAL; 1490 ret = -EINVAL;
@@ -1498,7 +1498,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1498 goto out; 1498 goto out;
1499 } 1499 }
1500 if (strlen(isid) >= PR_REG_ISID_LEN) { 1500 if (strlen(isid) >= PR_REG_ISID_LEN) {
1501 printk(KERN_ERR "APTPL metadata initiator_isid" 1501 pr_err("APTPL metadata initiator_isid"
1502 "= exceeds PR_REG_ISID_LEN: %d\n", 1502 "= exceeds PR_REG_ISID_LEN: %d\n",
1503 PR_REG_ISID_LEN); 1503 PR_REG_ISID_LEN);
1504 ret = -EINVAL; 1504 ret = -EINVAL;
@@ -1513,7 +1513,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1513 } 1513 }
1514 ret = strict_strtoull(arg_p, 0, &tmp_ll); 1514 ret = strict_strtoull(arg_p, 0, &tmp_ll);
1515 if (ret < 0) { 1515 if (ret < 0) {
1516 printk(KERN_ERR "strict_strtoull() failed for" 1516 pr_err("strict_strtoull() failed for"
1517 " sa_res_key=\n"); 1517 " sa_res_key=\n");
1518 goto out; 1518 goto out;
1519 } 1519 }
@@ -1559,7 +1559,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1559 goto out; 1559 goto out;
1560 } 1560 }
1561 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { 1561 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1562 printk(KERN_ERR "APTPL metadata target_node=" 1562 pr_err("APTPL metadata target_node="
1563 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 1563 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1564 PR_APTPL_MAX_TPORT_LEN); 1564 PR_APTPL_MAX_TPORT_LEN);
1565 ret = -EINVAL; 1565 ret = -EINVAL;
@@ -1583,14 +1583,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1583 } 1583 }
1584 } 1584 }
1585 1585
1586 if (!(i_port) || !(t_port) || !(sa_res_key)) { 1586 if (!i_port || !t_port || !sa_res_key) {
1587 printk(KERN_ERR "Illegal parameters for APTPL registration\n"); 1587 pr_err("Illegal parameters for APTPL registration\n");
1588 ret = -EINVAL; 1588 ret = -EINVAL;
1589 goto out; 1589 goto out;
1590 } 1590 }
1591 1591
1592 if (res_holder && !(type)) { 1592 if (res_holder && !(type)) {
1593 printk(KERN_ERR "Illegal PR type: 0x%02x for reservation" 1593 pr_err("Illegal PR type: 0x%02x for reservation"
1594 " holder\n", type); 1594 " holder\n", type);
1595 ret = -EINVAL; 1595 ret = -EINVAL;
1596 goto out; 1596 goto out;
@@ -1649,7 +1649,7 @@ static ssize_t target_core_show_dev_info(void *p, char *page)
1649 int bl = 0; 1649 int bl = 0;
1650 ssize_t read_bytes = 0; 1650 ssize_t read_bytes = 0;
1651 1651
1652 if (!(se_dev->se_dev_ptr)) 1652 if (!se_dev->se_dev_ptr)
1653 return -ENODEV; 1653 return -ENODEV;
1654 1654
1655 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); 1655 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
@@ -1675,8 +1675,8 @@ static ssize_t target_core_store_dev_control(
1675 struct se_hba *hba = se_dev->se_dev_hba; 1675 struct se_hba *hba = se_dev->se_dev_hba;
1676 struct se_subsystem_api *t = hba->transport; 1676 struct se_subsystem_api *t = hba->transport;
1677 1677
1678 if (!(se_dev->se_dev_su_ptr)) { 1678 if (!se_dev->se_dev_su_ptr) {
1679 printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se" 1679 pr_err("Unable to locate struct se_subsystem_dev>se"
1680 "_dev_su_ptr\n"); 1680 "_dev_su_ptr\n");
1681 return -EINVAL; 1681 return -EINVAL;
1682 } 1682 }
@@ -1712,7 +1712,7 @@ static ssize_t target_core_store_dev_alias(
1712 ssize_t read_bytes; 1712 ssize_t read_bytes;
1713 1713
1714 if (count > (SE_DEV_ALIAS_LEN-1)) { 1714 if (count > (SE_DEV_ALIAS_LEN-1)) {
1715 printk(KERN_ERR "alias count: %d exceeds" 1715 pr_err("alias count: %d exceeds"
1716 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, 1716 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1717 SE_DEV_ALIAS_LEN-1); 1717 SE_DEV_ALIAS_LEN-1);
1718 return -EINVAL; 1718 return -EINVAL;
@@ -1722,7 +1722,7 @@ static ssize_t target_core_store_dev_alias(
1722 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1722 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1723 "%s", page); 1723 "%s", page);
1724 1724
1725 printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n", 1725 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1726 config_item_name(&hba->hba_group.cg_item), 1726 config_item_name(&hba->hba_group.cg_item),
1727 config_item_name(&se_dev->se_dev_group.cg_item), 1727 config_item_name(&se_dev->se_dev_group.cg_item),
1728 se_dev->se_dev_alias); 1728 se_dev->se_dev_alias);
@@ -1758,7 +1758,7 @@ static ssize_t target_core_store_dev_udev_path(
1758 ssize_t read_bytes; 1758 ssize_t read_bytes;
1759 1759
1760 if (count > (SE_UDEV_PATH_LEN-1)) { 1760 if (count > (SE_UDEV_PATH_LEN-1)) {
1761 printk(KERN_ERR "udev_path count: %d exceeds" 1761 pr_err("udev_path count: %d exceeds"
1762 " SE_UDEV_PATH_LEN-1: %u\n", (int)count, 1762 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1763 SE_UDEV_PATH_LEN-1); 1763 SE_UDEV_PATH_LEN-1);
1764 return -EINVAL; 1764 return -EINVAL;
@@ -1768,7 +1768,7 @@ static ssize_t target_core_store_dev_udev_path(
1768 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1768 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1769 "%s", page); 1769 "%s", page);
1770 1770
1771 printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1771 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1772 config_item_name(&hba->hba_group.cg_item), 1772 config_item_name(&hba->hba_group.cg_item),
1773 config_item_name(&se_dev->se_dev_group.cg_item), 1773 config_item_name(&se_dev->se_dev_group.cg_item),
1774 se_dev->se_dev_udev_path); 1774 se_dev->se_dev_udev_path);
@@ -1796,13 +1796,13 @@ static ssize_t target_core_store_dev_enable(
1796 char *ptr; 1796 char *ptr;
1797 1797
1798 ptr = strstr(page, "1"); 1798 ptr = strstr(page, "1");
1799 if (!(ptr)) { 1799 if (!ptr) {
1800 printk(KERN_ERR "For dev_enable ops, only valid value" 1800 pr_err("For dev_enable ops, only valid value"
1801 " is \"1\"\n"); 1801 " is \"1\"\n");
1802 return -EINVAL; 1802 return -EINVAL;
1803 } 1803 }
1804 if ((se_dev->se_dev_ptr)) { 1804 if (se_dev->se_dev_ptr) {
1805 printk(KERN_ERR "se_dev->se_dev_ptr already set for storage" 1805 pr_err("se_dev->se_dev_ptr already set for storage"
1806 " object\n"); 1806 " object\n");
1807 return -EEXIST; 1807 return -EEXIST;
1808 } 1808 }
@@ -1817,7 +1817,7 @@ static ssize_t target_core_store_dev_enable(
1817 return -EINVAL; 1817 return -EINVAL;
1818 1818
1819 se_dev->se_dev_ptr = dev; 1819 se_dev->se_dev_ptr = dev;
1820 printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" 1820 pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
1821 " %p\n", se_dev->se_dev_ptr); 1821 " %p\n", se_dev->se_dev_ptr);
1822 1822
1823 return count; 1823 return count;
@@ -1841,22 +1841,22 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1841 ssize_t len = 0; 1841 ssize_t len = 0;
1842 1842
1843 dev = su_dev->se_dev_ptr; 1843 dev = su_dev->se_dev_ptr;
1844 if (!(dev)) 1844 if (!dev)
1845 return -ENODEV; 1845 return -ENODEV;
1846 1846
1847 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) 1847 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
1848 return len; 1848 return len;
1849 1849
1850 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1850 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1851 if (!(lu_gp_mem)) { 1851 if (!lu_gp_mem) {
1852 printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" 1852 pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
1853 " pointer\n"); 1853 " pointer\n");
1854 return -EINVAL; 1854 return -EINVAL;
1855 } 1855 }
1856 1856
1857 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1857 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1858 lu_gp = lu_gp_mem->lu_gp; 1858 lu_gp = lu_gp_mem->lu_gp;
1859 if ((lu_gp)) { 1859 if (lu_gp) {
1860 lu_ci = &lu_gp->lu_gp_group.cg_item; 1860 lu_ci = &lu_gp->lu_gp_group.cg_item;
1861 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", 1861 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1862 config_item_name(lu_ci), lu_gp->lu_gp_id); 1862 config_item_name(lu_ci), lu_gp->lu_gp_id);
@@ -1880,17 +1880,17 @@ static ssize_t target_core_store_alua_lu_gp(
1880 int move = 0; 1880 int move = 0;
1881 1881
1882 dev = su_dev->se_dev_ptr; 1882 dev = su_dev->se_dev_ptr;
1883 if (!(dev)) 1883 if (!dev)
1884 return -ENODEV; 1884 return -ENODEV;
1885 1885
1886 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1886 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1887 printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", 1887 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
1888 config_item_name(&hba->hba_group.cg_item), 1888 config_item_name(&hba->hba_group.cg_item),
1889 config_item_name(&su_dev->se_dev_group.cg_item)); 1889 config_item_name(&su_dev->se_dev_group.cg_item));
1890 return -EINVAL; 1890 return -EINVAL;
1891 } 1891 }
1892 if (count > LU_GROUP_NAME_BUF) { 1892 if (count > LU_GROUP_NAME_BUF) {
1893 printk(KERN_ERR "ALUA LU Group Alias too large!\n"); 1893 pr_err("ALUA LU Group Alias too large!\n");
1894 return -EINVAL; 1894 return -EINVAL;
1895 } 1895 }
1896 memset(buf, 0, LU_GROUP_NAME_BUF); 1896 memset(buf, 0, LU_GROUP_NAME_BUF);
@@ -1906,27 +1906,27 @@ static ssize_t target_core_store_alua_lu_gp(
1906 * core_alua_get_lu_gp_by_name below(). 1906 * core_alua_get_lu_gp_by_name below().
1907 */ 1907 */
1908 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); 1908 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
1909 if (!(lu_gp_new)) 1909 if (!lu_gp_new)
1910 return -ENODEV; 1910 return -ENODEV;
1911 } 1911 }
1912 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1912 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1913 if (!(lu_gp_mem)) { 1913 if (!lu_gp_mem) {
1914 if (lu_gp_new) 1914 if (lu_gp_new)
1915 core_alua_put_lu_gp_from_name(lu_gp_new); 1915 core_alua_put_lu_gp_from_name(lu_gp_new);
1916 printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" 1916 pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
1917 " pointer\n"); 1917 " pointer\n");
1918 return -EINVAL; 1918 return -EINVAL;
1919 } 1919 }
1920 1920
1921 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1921 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1922 lu_gp = lu_gp_mem->lu_gp; 1922 lu_gp = lu_gp_mem->lu_gp;
1923 if ((lu_gp)) { 1923 if (lu_gp) {
1924 /* 1924 /*
1925 * Clearing an existing lu_gp association, and replacing 1925 * Clearing an existing lu_gp association, and replacing
1926 * with NULL 1926 * with NULL
1927 */ 1927 */
1928 if (!(lu_gp_new)) { 1928 if (!lu_gp_new) {
1929 printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s" 1929 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
1930 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 1930 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1931 " %hu\n", 1931 " %hu\n",
1932 config_item_name(&hba->hba_group.cg_item), 1932 config_item_name(&hba->hba_group.cg_item),
@@ -1951,7 +1951,7 @@ static ssize_t target_core_store_alua_lu_gp(
1951 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); 1951 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1952 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1952 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1953 1953
1954 printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" 1954 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
1955 " core/alua/lu_gps/%s, ID: %hu\n", 1955 " core/alua/lu_gps/%s, ID: %hu\n",
1956 (move) ? "Moving" : "Adding", 1956 (move) ? "Moving" : "Adding",
1957 config_item_name(&hba->hba_group.cg_item), 1957 config_item_name(&hba->hba_group.cg_item),
@@ -1995,7 +1995,7 @@ static void target_core_dev_release(struct config_item *item)
1995 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 1995 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
1996 */ 1996 */
1997 if (se_dev->se_dev_ptr) { 1997 if (se_dev->se_dev_ptr) {
1998 printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" 1998 pr_debug("Target_Core_ConfigFS: Calling se_free_"
1999 "virtual_device() for se_dev_ptr: %p\n", 1999 "virtual_device() for se_dev_ptr: %p\n",
2000 se_dev->se_dev_ptr); 2000 se_dev->se_dev_ptr);
2001 2001
@@ -2004,14 +2004,14 @@ static void target_core_dev_release(struct config_item *item)
2004 /* 2004 /*
2005 * Release struct se_subsystem_dev->se_dev_su_ptr.. 2005 * Release struct se_subsystem_dev->se_dev_su_ptr..
2006 */ 2006 */
2007 printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" 2007 pr_debug("Target_Core_ConfigFS: Calling t->free_"
2008 "device() for se_dev_su_ptr: %p\n", 2008 "device() for se_dev_su_ptr: %p\n",
2009 se_dev->se_dev_su_ptr); 2009 se_dev->se_dev_su_ptr);
2010 2010
2011 t->free_device(se_dev->se_dev_su_ptr); 2011 t->free_device(se_dev->se_dev_su_ptr);
2012 } 2012 }
2013 2013
2014 printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" 2014 pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
2015 "_dev_t: %p\n", se_dev); 2015 "_dev_t: %p\n", se_dev);
2016 kfree(se_dev); 2016 kfree(se_dev);
2017} 2017}
@@ -2026,7 +2026,7 @@ static ssize_t target_core_dev_show(struct config_item *item,
2026 struct target_core_configfs_attribute *tc_attr = container_of( 2026 struct target_core_configfs_attribute *tc_attr = container_of(
2027 attr, struct target_core_configfs_attribute, attr); 2027 attr, struct target_core_configfs_attribute, attr);
2028 2028
2029 if (!(tc_attr->show)) 2029 if (!tc_attr->show)
2030 return -EINVAL; 2030 return -EINVAL;
2031 2031
2032 return tc_attr->show(se_dev, page); 2032 return tc_attr->show(se_dev, page);
@@ -2042,7 +2042,7 @@ static ssize_t target_core_dev_store(struct config_item *item,
2042 struct target_core_configfs_attribute *tc_attr = container_of( 2042 struct target_core_configfs_attribute *tc_attr = container_of(
2043 attr, struct target_core_configfs_attribute, attr); 2043 attr, struct target_core_configfs_attribute, attr);
2044 2044
2045 if (!(tc_attr->store)) 2045 if (!tc_attr->store)
2046 return -EINVAL; 2046 return -EINVAL;
2047 2047
2048 return tc_attr->store(se_dev, page, count); 2048 return tc_attr->store(se_dev, page, count);
@@ -2085,7 +2085,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
2085 struct t10_alua_lu_gp *lu_gp, 2085 struct t10_alua_lu_gp *lu_gp,
2086 char *page) 2086 char *page)
2087{ 2087{
2088 if (!(lu_gp->lu_gp_valid_id)) 2088 if (!lu_gp->lu_gp_valid_id)
2089 return 0; 2089 return 0;
2090 2090
2091 return sprintf(page, "%hu\n", lu_gp->lu_gp_id); 2091 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
@@ -2102,12 +2102,12 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
2102 2102
2103 ret = strict_strtoul(page, 0, &lu_gp_id); 2103 ret = strict_strtoul(page, 0, &lu_gp_id);
2104 if (ret < 0) { 2104 if (ret < 0) {
2105 printk(KERN_ERR "strict_strtoul() returned %d for" 2105 pr_err("strict_strtoul() returned %d for"
2106 " lu_gp_id\n", ret); 2106 " lu_gp_id\n", ret);
2107 return -EINVAL; 2107 return -EINVAL;
2108 } 2108 }
2109 if (lu_gp_id > 0x0000ffff) { 2109 if (lu_gp_id > 0x0000ffff) {
2110 printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:" 2110 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2111 " 0x0000ffff\n", lu_gp_id); 2111 " 0x0000ffff\n", lu_gp_id);
2112 return -EINVAL; 2112 return -EINVAL;
2113 } 2113 }
@@ -2116,7 +2116,7 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
2116 if (ret < 0) 2116 if (ret < 0)
2117 return -EINVAL; 2117 return -EINVAL;
2118 2118
2119 printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit" 2119 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2120 " Group: core/alua/lu_gps/%s to ID: %hu\n", 2120 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2121 config_item_name(&alua_lu_gp_cg->cg_item), 2121 config_item_name(&alua_lu_gp_cg->cg_item),
2122 lu_gp->lu_gp_id); 2122 lu_gp->lu_gp_id);
@@ -2154,7 +2154,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
2154 cur_len++; /* Extra byte for NULL terminator */ 2154 cur_len++; /* Extra byte for NULL terminator */
2155 2155
2156 if ((cur_len + len) > PAGE_SIZE) { 2156 if ((cur_len + len) > PAGE_SIZE) {
2157 printk(KERN_WARNING "Ran out of lu_gp_show_attr" 2157 pr_warn("Ran out of lu_gp_show_attr"
2158 "_members buffer\n"); 2158 "_members buffer\n");
2159 break; 2159 break;
2160 } 2160 }
@@ -2218,7 +2218,7 @@ static struct config_group *target_core_alua_create_lu_gp(
2218 config_group_init_type_name(alua_lu_gp_cg, name, 2218 config_group_init_type_name(alua_lu_gp_cg, name,
2219 &target_core_alua_lu_gp_cit); 2219 &target_core_alua_lu_gp_cit);
2220 2220
2221 printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit" 2221 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2222 " Group: core/alua/lu_gps/%s\n", 2222 " Group: core/alua/lu_gps/%s\n",
2223 config_item_name(alua_lu_gp_ci)); 2223 config_item_name(alua_lu_gp_ci));
2224 2224
@@ -2233,7 +2233,7 @@ static void target_core_alua_drop_lu_gp(
2233 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2233 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2234 struct t10_alua_lu_gp, lu_gp_group); 2234 struct t10_alua_lu_gp, lu_gp_group);
2235 2235
2236 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2236 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2237 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2237 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2238 config_item_name(item), lu_gp->lu_gp_id); 2238 config_item_name(item), lu_gp->lu_gp_id);
2239 /* 2239 /*
@@ -2292,22 +2292,22 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2292 unsigned long tmp; 2292 unsigned long tmp;
2293 int new_state, ret; 2293 int new_state, ret;
2294 2294
2295 if (!(tg_pt_gp->tg_pt_gp_valid_id)) { 2295 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2296 printk(KERN_ERR "Unable to do implict ALUA on non valid" 2296 pr_err("Unable to do implict ALUA on non valid"
2297 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); 2297 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2298 return -EINVAL; 2298 return -EINVAL;
2299 } 2299 }
2300 2300
2301 ret = strict_strtoul(page, 0, &tmp); 2301 ret = strict_strtoul(page, 0, &tmp);
2302 if (ret < 0) { 2302 if (ret < 0) {
2303 printk("Unable to extract new ALUA access state from" 2303 pr_err("Unable to extract new ALUA access state from"
2304 " %s\n", page); 2304 " %s\n", page);
2305 return -EINVAL; 2305 return -EINVAL;
2306 } 2306 }
2307 new_state = (int)tmp; 2307 new_state = (int)tmp;
2308 2308
2309 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { 2309 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
2310 printk(KERN_ERR "Unable to process implict configfs ALUA" 2310 pr_err("Unable to process implict configfs ALUA"
2311 " transition while TPGS_IMPLICT_ALUA is diabled\n"); 2311 " transition while TPGS_IMPLICT_ALUA is diabled\n");
2312 return -EINVAL; 2312 return -EINVAL;
2313 } 2313 }
@@ -2338,8 +2338,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2338 unsigned long tmp; 2338 unsigned long tmp;
2339 int new_status, ret; 2339 int new_status, ret;
2340 2340
2341 if (!(tg_pt_gp->tg_pt_gp_valid_id)) { 2341 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2342 printk(KERN_ERR "Unable to do set ALUA access status on non" 2342 pr_err("Unable to do set ALUA access status on non"
2343 " valid tg_pt_gp ID: %hu\n", 2343 " valid tg_pt_gp ID: %hu\n",
2344 tg_pt_gp->tg_pt_gp_valid_id); 2344 tg_pt_gp->tg_pt_gp_valid_id);
2345 return -EINVAL; 2345 return -EINVAL;
@@ -2347,7 +2347,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2347 2347
2348 ret = strict_strtoul(page, 0, &tmp); 2348 ret = strict_strtoul(page, 0, &tmp);
2349 if (ret < 0) { 2349 if (ret < 0) {
2350 printk(KERN_ERR "Unable to extract new ALUA access status" 2350 pr_err("Unable to extract new ALUA access status"
2351 " from %s\n", page); 2351 " from %s\n", page);
2352 return -EINVAL; 2352 return -EINVAL;
2353 } 2353 }
@@ -2356,7 +2356,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2356 if ((new_status != ALUA_STATUS_NONE) && 2356 if ((new_status != ALUA_STATUS_NONE) &&
2357 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 2357 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
2358 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 2358 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
2359 printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n", 2359 pr_err("Illegal ALUA access status: 0x%02x\n",
2360 new_status); 2360 new_status);
2361 return -EINVAL; 2361 return -EINVAL;
2362 } 2362 }
@@ -2407,12 +2407,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2407 2407
2408 ret = strict_strtoul(page, 0, &tmp); 2408 ret = strict_strtoul(page, 0, &tmp);
2409 if (ret < 0) { 2409 if (ret < 0) {
2410 printk(KERN_ERR "Unable to extract alua_write_metadata\n"); 2410 pr_err("Unable to extract alua_write_metadata\n");
2411 return -EINVAL; 2411 return -EINVAL;
2412 } 2412 }
2413 2413
2414 if ((tmp != 0) && (tmp != 1)) { 2414 if ((tmp != 0) && (tmp != 1)) {
2415 printk(KERN_ERR "Illegal value for alua_write_metadata:" 2415 pr_err("Illegal value for alua_write_metadata:"
2416 " %lu\n", tmp); 2416 " %lu\n", tmp);
2417 return -EINVAL; 2417 return -EINVAL;
2418 } 2418 }
@@ -2494,7 +2494,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2494 struct t10_alua_tg_pt_gp *tg_pt_gp, 2494 struct t10_alua_tg_pt_gp *tg_pt_gp,
2495 char *page) 2495 char *page)
2496{ 2496{
2497 if (!(tg_pt_gp->tg_pt_gp_valid_id)) 2497 if (!tg_pt_gp->tg_pt_gp_valid_id)
2498 return 0; 2498 return 0;
2499 2499
2500 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); 2500 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
@@ -2511,12 +2511,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2511 2511
2512 ret = strict_strtoul(page, 0, &tg_pt_gp_id); 2512 ret = strict_strtoul(page, 0, &tg_pt_gp_id);
2513 if (ret < 0) { 2513 if (ret < 0) {
2514 printk(KERN_ERR "strict_strtoul() returned %d for" 2514 pr_err("strict_strtoul() returned %d for"
2515 " tg_pt_gp_id\n", ret); 2515 " tg_pt_gp_id\n", ret);
2516 return -EINVAL; 2516 return -EINVAL;
2517 } 2517 }
2518 if (tg_pt_gp_id > 0x0000ffff) { 2518 if (tg_pt_gp_id > 0x0000ffff) {
2519 printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:" 2519 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
2520 " 0x0000ffff\n", tg_pt_gp_id); 2520 " 0x0000ffff\n", tg_pt_gp_id);
2521 return -EINVAL; 2521 return -EINVAL;
2522 } 2522 }
@@ -2525,7 +2525,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2525 if (ret < 0) 2525 if (ret < 0)
2526 return -EINVAL; 2526 return -EINVAL;
2527 2527
2528 printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: " 2528 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2529 "core/alua/tg_pt_gps/%s to ID: %hu\n", 2529 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2530 config_item_name(&alua_tg_pt_gp_cg->cg_item), 2530 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2531 tg_pt_gp->tg_pt_gp_id); 2531 tg_pt_gp->tg_pt_gp_id);
@@ -2566,7 +2566,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
2566 cur_len++; /* Extra byte for NULL terminator */ 2566 cur_len++; /* Extra byte for NULL terminator */
2567 2567
2568 if ((cur_len + len) > PAGE_SIZE) { 2568 if ((cur_len + len) > PAGE_SIZE) {
2569 printk(KERN_WARNING "Ran out of lu_gp_show_attr" 2569 pr_warn("Ran out of lu_gp_show_attr"
2570 "_members buffer\n"); 2570 "_members buffer\n");
2571 break; 2571 break;
2572 } 2572 }
@@ -2632,7 +2632,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
2632 struct config_item *alua_tg_pt_gp_ci = NULL; 2632 struct config_item *alua_tg_pt_gp_ci = NULL;
2633 2633
2634 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); 2634 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
2635 if (!(tg_pt_gp)) 2635 if (!tg_pt_gp)
2636 return NULL; 2636 return NULL;
2637 2637
2638 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; 2638 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
@@ -2641,7 +2641,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
2641 config_group_init_type_name(alua_tg_pt_gp_cg, name, 2641 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2642 &target_core_alua_tg_pt_gp_cit); 2642 &target_core_alua_tg_pt_gp_cit);
2643 2643
2644 printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port" 2644 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2645 " Group: alua/tg_pt_gps/%s\n", 2645 " Group: alua/tg_pt_gps/%s\n",
2646 config_item_name(alua_tg_pt_gp_ci)); 2646 config_item_name(alua_tg_pt_gp_ci));
2647 2647
@@ -2655,7 +2655,7 @@ static void target_core_alua_drop_tg_pt_gp(
2655 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 2655 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2656 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 2656 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2657 2657
2658 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" 2658 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2659 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 2659 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2660 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 2660 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2661 /* 2661 /*
@@ -2746,7 +2746,7 @@ static struct config_group *target_core_make_subdev(
2746 2746
2747 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 2747 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
2748 if (!se_dev) { 2748 if (!se_dev) {
2749 printk(KERN_ERR "Unable to allocate memory for" 2749 pr_err("Unable to allocate memory for"
2750 " struct se_subsystem_dev\n"); 2750 " struct se_subsystem_dev\n");
2751 goto unlock; 2751 goto unlock;
2752 } 2752 }
@@ -2770,7 +2770,7 @@ static struct config_group *target_core_make_subdev(
2770 2770
2771 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, 2771 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
2772 GFP_KERNEL); 2772 GFP_KERNEL);
2773 if (!(dev_cg->default_groups)) 2773 if (!dev_cg->default_groups)
2774 goto out; 2774 goto out;
2775 /* 2775 /*
2776 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr 2776 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
@@ -2781,8 +2781,8 @@ static struct config_group *target_core_make_subdev(
2781 * configfs tree for device object's struct config_group. 2781 * configfs tree for device object's struct config_group.
2782 */ 2782 */
2783 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); 2783 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
2784 if (!(se_dev->se_dev_su_ptr)) { 2784 if (!se_dev->se_dev_su_ptr) {
2785 printk(KERN_ERR "Unable to locate subsystem dependent pointer" 2785 pr_err("Unable to locate subsystem dependent pointer"
2786 " from allocate_virtdevice()\n"); 2786 " from allocate_virtdevice()\n");
2787 goto out; 2787 goto out;
2788 } 2788 }
@@ -2813,14 +2813,14 @@ static struct config_group *target_core_make_subdev(
2813 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 2813 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2814 */ 2814 */
2815 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); 2815 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
2816 if (!(tg_pt_gp)) 2816 if (!tg_pt_gp)
2817 goto out; 2817 goto out;
2818 2818
2819 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2819 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
2820 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 2820 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
2821 GFP_KERNEL); 2821 GFP_KERNEL);
2822 if (!(tg_pt_gp_cg->default_groups)) { 2822 if (!tg_pt_gp_cg->default_groups) {
2823 printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->" 2823 pr_err("Unable to allocate tg_pt_gp_cg->"
2824 "default_groups\n"); 2824 "default_groups\n");
2825 goto out; 2825 goto out;
2826 } 2826 }
@@ -2837,12 +2837,12 @@ static struct config_group *target_core_make_subdev(
2837 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, 2837 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
2838 GFP_KERNEL); 2838 GFP_KERNEL);
2839 if (!dev_stat_grp->default_groups) { 2839 if (!dev_stat_grp->default_groups) {
2840 printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); 2840 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
2841 goto out; 2841 goto out;
2842 } 2842 }
2843 target_stat_setup_dev_default_groups(se_dev); 2843 target_stat_setup_dev_default_groups(se_dev);
2844 2844
2845 printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" 2845 pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
2846 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); 2846 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
2847 2847
2848 mutex_unlock(&hba->hba_access_mutex); 2848 mutex_unlock(&hba->hba_access_mutex);
@@ -2975,13 +2975,13 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2975 2975
2976 ret = strict_strtoul(page, 0, &mode_flag); 2976 ret = strict_strtoul(page, 0, &mode_flag);
2977 if (ret < 0) { 2977 if (ret < 0) {
2978 printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret); 2978 pr_err("Unable to extract hba mode flag: %d\n", ret);
2979 return -EINVAL; 2979 return -EINVAL;
2980 } 2980 }
2981 2981
2982 spin_lock(&hba->device_lock); 2982 spin_lock(&hba->device_lock);
2983 if (!(list_empty(&hba->hba_dev_list))) { 2983 if (!list_empty(&hba->hba_dev_list)) {
2984 printk(KERN_ERR "Unable to set hba_mode with active devices\n"); 2984 pr_err("Unable to set hba_mode with active devices\n");
2985 spin_unlock(&hba->device_lock); 2985 spin_unlock(&hba->device_lock);
2986 return -EINVAL; 2986 return -EINVAL;
2987 } 2987 }
@@ -3040,7 +3040,7 @@ static struct config_group *target_core_call_addhbatotarget(
3040 3040
3041 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); 3041 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3042 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { 3042 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3043 printk(KERN_ERR "Passed *name strlen(): %d exceeds" 3043 pr_err("Passed *name strlen(): %d exceeds"
3044 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), 3044 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3045 TARGET_CORE_NAME_MAX_LEN); 3045 TARGET_CORE_NAME_MAX_LEN);
3046 return ERR_PTR(-ENAMETOOLONG); 3046 return ERR_PTR(-ENAMETOOLONG);
@@ -3048,8 +3048,8 @@ static struct config_group *target_core_call_addhbatotarget(
3048 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); 3048 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3049 3049
3050 str = strstr(buf, "_"); 3050 str = strstr(buf, "_");
3051 if (!(str)) { 3051 if (!str) {
3052 printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); 3052 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3053 return ERR_PTR(-EINVAL); 3053 return ERR_PTR(-EINVAL);
3054 } 3054 }
3055 se_plugin_str = buf; 3055 se_plugin_str = buf;
@@ -3058,7 +3058,7 @@ static struct config_group *target_core_call_addhbatotarget(
3058 * Namely rd_direct and rd_mcp.. 3058 * Namely rd_direct and rd_mcp..
3059 */ 3059 */
3060 str2 = strstr(str+1, "_"); 3060 str2 = strstr(str+1, "_");
3061 if ((str2)) { 3061 if (str2) {
3062 *str2 = '\0'; /* Terminate for *se_plugin_str */ 3062 *str2 = '\0'; /* Terminate for *se_plugin_str */
3063 str2++; /* Skip to start of plugin dependent ID */ 3063 str2++; /* Skip to start of plugin dependent ID */
3064 str = str2; 3064 str = str2;
@@ -3069,7 +3069,7 @@ static struct config_group *target_core_call_addhbatotarget(
3069 3069
3070 ret = strict_strtoul(str, 0, &plugin_dep_id); 3070 ret = strict_strtoul(str, 0, &plugin_dep_id);
3071 if (ret < 0) { 3071 if (ret < 0) {
3072 printk(KERN_ERR "strict_strtoul() returned %d for" 3072 pr_err("strict_strtoul() returned %d for"
3073 " plugin_dep_id\n", ret); 3073 " plugin_dep_id\n", ret);
3074 return ERR_PTR(-EINVAL); 3074 return ERR_PTR(-EINVAL);
3075 } 3075 }
@@ -3122,7 +3122,7 @@ static int __init target_core_init_configfs(void)
3122 struct t10_alua_lu_gp *lu_gp; 3122 struct t10_alua_lu_gp *lu_gp;
3123 int ret; 3123 int ret;
3124 3124
3125 printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage" 3125 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3126 " Engine: %s on %s/%s on "UTS_RELEASE"\n", 3126 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3127 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); 3127 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3128 3128
@@ -3142,8 +3142,8 @@ static int __init target_core_init_configfs(void)
3142 target_cg = &subsys->su_group; 3142 target_cg = &subsys->su_group;
3143 target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3143 target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3144 GFP_KERNEL); 3144 GFP_KERNEL);
3145 if (!(target_cg->default_groups)) { 3145 if (!target_cg->default_groups) {
3146 printk(KERN_ERR "Unable to allocate target_cg->default_groups\n"); 3146 pr_err("Unable to allocate target_cg->default_groups\n");
3147 goto out_global; 3147 goto out_global;
3148 } 3148 }
3149 3149
@@ -3157,8 +3157,8 @@ static int __init target_core_init_configfs(void)
3157 hba_cg = &target_core_hbagroup; 3157 hba_cg = &target_core_hbagroup;
3158 hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3158 hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3159 GFP_KERNEL); 3159 GFP_KERNEL);
3160 if (!(hba_cg->default_groups)) { 3160 if (!hba_cg->default_groups) {
3161 printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); 3161 pr_err("Unable to allocate hba_cg->default_groups\n");
3162 goto out_global; 3162 goto out_global;
3163 } 3163 }
3164 config_group_init_type_name(&alua_group, 3164 config_group_init_type_name(&alua_group,
@@ -3172,8 +3172,8 @@ static int __init target_core_init_configfs(void)
3172 alua_cg = &alua_group; 3172 alua_cg = &alua_group;
3173 alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3173 alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3174 GFP_KERNEL); 3174 GFP_KERNEL);
3175 if (!(alua_cg->default_groups)) { 3175 if (!alua_cg->default_groups) {
3176 printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n"); 3176 pr_err("Unable to allocate alua_cg->default_groups\n");
3177 goto out_global; 3177 goto out_global;
3178 } 3178 }
3179 3179
@@ -3191,8 +3191,8 @@ static int __init target_core_init_configfs(void)
3191 lu_gp_cg = &alua_lu_gps_group; 3191 lu_gp_cg = &alua_lu_gps_group;
3192 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3192 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3193 GFP_KERNEL); 3193 GFP_KERNEL);
3194 if (!(lu_gp_cg->default_groups)) { 3194 if (!lu_gp_cg->default_groups) {
3195 printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n"); 3195 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
3196 goto out_global; 3196 goto out_global;
3197 } 3197 }
3198 3198
@@ -3206,11 +3206,11 @@ static int __init target_core_init_configfs(void)
3206 */ 3206 */
3207 ret = configfs_register_subsystem(subsys); 3207 ret = configfs_register_subsystem(subsys);
3208 if (ret < 0) { 3208 if (ret < 0) {
3209 printk(KERN_ERR "Error %d while registering subsystem %s\n", 3209 pr_err("Error %d while registering subsystem %s\n",
3210 ret, subsys->su_group.cg_item.ci_namebuf); 3210 ret, subsys->su_group.cg_item.ci_namebuf);
3211 goto out_global; 3211 goto out_global;
3212 } 3212 }
3213 printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric" 3213 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3214 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" 3214 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
3215 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); 3215 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3216 /* 3216 /*
@@ -3290,7 +3290,7 @@ static void __exit target_core_exit_configfs(void)
3290 core_alua_free_lu_gp(default_lu_gp); 3290 core_alua_free_lu_gp(default_lu_gp);
3291 default_lu_gp = NULL; 3291 default_lu_gp = NULL;
3292 3292
3293 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" 3293 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3294 " Infrastructure\n"); 3294 " Infrastructure\n");
3295 3295
3296 core_dev_release_virtual_lun0(); 3296 core_dev_release_virtual_lun0();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 1185c3b76d47..81860ddc7cc4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -84,7 +84,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
87 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 87 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
88 " Access for 0x%08x\n", 88 " Access for 0x%08x\n",
89 se_cmd->se_tfo->get_fabric_name(), 89 se_cmd->se_tfo->get_fabric_name(),
90 unpacked_lun); 90 unpacked_lun);
@@ -117,7 +117,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
117 if (unpacked_lun != 0) { 117 if (unpacked_lun != 0) {
118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
120 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 120 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
121 " Access for 0x%08x\n", 121 " Access for 0x%08x\n",
122 se_cmd->se_tfo->get_fabric_name(), 122 se_cmd->se_tfo->get_fabric_name(),
123 unpacked_lun); 123 unpacked_lun);
@@ -204,7 +204,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
204 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 204 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
205 205
206 if (!se_lun) { 206 if (!se_lun) {
207 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 207 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
208 " Access for 0x%08x\n", 208 " Access for 0x%08x\n",
209 se_cmd->se_tfo->get_fabric_name(), 209 se_cmd->se_tfo->get_fabric_name(),
210 unpacked_lun); 210 unpacked_lun);
@@ -255,15 +255,15 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
255 continue; 255 continue;
256 256
257 lun = deve->se_lun; 257 lun = deve->se_lun;
258 if (!(lun)) { 258 if (!lun) {
259 printk(KERN_ERR "%s device entries device pointer is" 259 pr_err("%s device entries device pointer is"
260 " NULL, but Initiator has access.\n", 260 " NULL, but Initiator has access.\n",
261 tpg->se_tpg_tfo->get_fabric_name()); 261 tpg->se_tpg_tfo->get_fabric_name());
262 continue; 262 continue;
263 } 263 }
264 port = lun->lun_sep; 264 port = lun->lun_sep;
265 if (!(port)) { 265 if (!port) {
266 printk(KERN_ERR "%s device entries device pointer is" 266 pr_err("%s device entries device pointer is"
267 " NULL, but Initiator has access.\n", 267 " NULL, but Initiator has access.\n",
268 tpg->se_tpg_tfo->get_fabric_name()); 268 tpg->se_tpg_tfo->get_fabric_name());
269 continue; 269 continue;
@@ -301,7 +301,7 @@ int core_free_device_list_for_node(
301 continue; 301 continue;
302 302
303 if (!deve->se_lun) { 303 if (!deve->se_lun) {
304 printk(KERN_ERR "%s device entries device pointer is" 304 pr_err("%s device entries device pointer is"
305 " NULL, but Initiator has access.\n", 305 " NULL, but Initiator has access.\n",
306 tpg->se_tpg_tfo->get_fabric_name()); 306 tpg->se_tpg_tfo->get_fabric_name());
307 continue; 307 continue;
@@ -372,7 +372,7 @@ int core_update_device_list_for_node(
372 * struct se_dev_entry pointers below as logic in 372 * struct se_dev_entry pointers below as logic in
373 * core_alua_do_transition_tg_pt() depends on these being present. 373 * core_alua_do_transition_tg_pt() depends on these being present.
374 */ 374 */
375 if (!(enable)) { 375 if (!enable) {
376 /* 376 /*
377 * deve->se_lun_acl will be NULL for demo-mode created LUNs 377 * deve->se_lun_acl will be NULL for demo-mode created LUNs
378 * that have not been explicitly concerted to MappedLUNs -> 378 * that have not been explicitly concerted to MappedLUNs ->
@@ -395,14 +395,14 @@ int core_update_device_list_for_node(
395 */ 395 */
396 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 396 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
397 if (deve->se_lun_acl != NULL) { 397 if (deve->se_lun_acl != NULL) {
398 printk(KERN_ERR "struct se_dev_entry->se_lun_acl" 398 pr_err("struct se_dev_entry->se_lun_acl"
399 " already set for demo mode -> explict" 399 " already set for demo mode -> explict"
400 " LUN ACL transition\n"); 400 " LUN ACL transition\n");
401 spin_unlock_irq(&nacl->device_list_lock); 401 spin_unlock_irq(&nacl->device_list_lock);
402 return -EINVAL; 402 return -EINVAL;
403 } 403 }
404 if (deve->se_lun != lun) { 404 if (deve->se_lun != lun) {
405 printk(KERN_ERR "struct se_dev_entry->se_lun does" 405 pr_err("struct se_dev_entry->se_lun does"
406 " match passed struct se_lun for demo mode" 406 " match passed struct se_lun for demo mode"
407 " -> explict LUN ACL transition\n"); 407 " -> explict LUN ACL transition\n");
408 spin_unlock_irq(&nacl->device_list_lock); 408 spin_unlock_irq(&nacl->device_list_lock);
@@ -501,8 +501,8 @@ static struct se_port *core_alloc_port(struct se_device *dev)
501 struct se_port *port, *port_tmp; 501 struct se_port *port, *port_tmp;
502 502
503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
504 if (!(port)) { 504 if (!port) {
505 printk(KERN_ERR "Unable to allocate struct se_port\n"); 505 pr_err("Unable to allocate struct se_port\n");
506 return ERR_PTR(-ENOMEM); 506 return ERR_PTR(-ENOMEM);
507 } 507 }
508 INIT_LIST_HEAD(&port->sep_alua_list); 508 INIT_LIST_HEAD(&port->sep_alua_list);
@@ -513,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev)
513 513
514 spin_lock(&dev->se_port_lock); 514 spin_lock(&dev->se_port_lock);
515 if (dev->dev_port_count == 0x0000ffff) { 515 if (dev->dev_port_count == 0x0000ffff) {
516 printk(KERN_WARNING "Reached dev->dev_port_count ==" 516 pr_warn("Reached dev->dev_port_count =="
517 " 0x0000ffff\n"); 517 " 0x0000ffff\n");
518 spin_unlock(&dev->se_port_lock); 518 spin_unlock(&dev->se_port_lock);
519 return ERR_PTR(-ENOSPC); 519 return ERR_PTR(-ENOSPC);
@@ -532,7 +532,7 @@ again:
532 * 3h to FFFFh Relative port 3 through 65 535 532 * 3h to FFFFh Relative port 3 through 65 535
533 */ 533 */
534 port->sep_rtpi = dev->dev_rpti_counter++; 534 port->sep_rtpi = dev->dev_rpti_counter++;
535 if (!(port->sep_rtpi)) 535 if (!port->sep_rtpi)
536 goto again; 536 goto again;
537 537
538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
@@ -570,7 +570,7 @@ static void core_export_port(
570 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 570 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
573 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" 573 pr_err("Unable to allocate t10_alua_tg_pt"
574 "_gp_member_t\n"); 574 "_gp_member_t\n");
575 return; 575 return;
576 } 576 }
@@ -578,7 +578,7 @@ static void core_export_port(
578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
579 su_dev->t10_alua.default_tg_pt_gp); 579 su_dev->t10_alua.default_tg_pt_gp);
580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
581 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" 581 pr_debug("%s/%s: Adding to default ALUA Target Port"
582 " Group: alua/default_tg_pt_gp\n", 582 " Group: alua/default_tg_pt_gp\n",
583 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 583 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
584 } 584 }
@@ -663,8 +663,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) 663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
664 break; 664 break;
665 665
666 if (!(se_task)) { 666 if (!se_task) {
667 printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); 667 pr_err("Unable to locate struct se_task for struct se_cmd\n");
668 return PYX_TRANSPORT_LU_COMM_FAILURE; 668 return PYX_TRANSPORT_LU_COMM_FAILURE;
669 } 669 }
670 670
@@ -675,7 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
675 * coming via a target_core_mod PASSTHROUGH op, and not through 675 * coming via a target_core_mod PASSTHROUGH op, and not through
676 * a $FABRIC_MOD. In that case, report LUN=0 only. 676 * a $FABRIC_MOD. In that case, report LUN=0 only.
677 */ 677 */
678 if (!(se_sess)) { 678 if (!se_sess) {
679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
680 lun_count = 1; 680 lun_count = 1;
681 goto done; 681 goto done;
@@ -893,12 +893,12 @@ void se_dev_set_default_attribs(
893int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) 893int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
894{ 894{
895 if (task_timeout > DA_TASK_TIMEOUT_MAX) { 895 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
896 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" 896 pr_err("dev[%p]: Passed task_timeout: %u larger then"
897 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); 897 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
898 return -EINVAL; 898 return -EINVAL;
899 } else { 899 } else {
900 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; 900 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
901 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", 901 pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
902 dev, task_timeout); 902 dev, task_timeout);
903 } 903 }
904 904
@@ -910,7 +910,7 @@ int se_dev_set_max_unmap_lba_count(
910 u32 max_unmap_lba_count) 910 u32 max_unmap_lba_count)
911{ 911{
912 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 912 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
913 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", 913 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
914 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); 914 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
915 return 0; 915 return 0;
916} 916}
@@ -921,7 +921,7 @@ int se_dev_set_max_unmap_block_desc_count(
921{ 921{
922 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 922 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
923 max_unmap_block_desc_count; 923 max_unmap_block_desc_count;
924 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", 924 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
925 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); 925 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
926 return 0; 926 return 0;
927} 927}
@@ -931,7 +931,7 @@ int se_dev_set_unmap_granularity(
931 u32 unmap_granularity) 931 u32 unmap_granularity)
932{ 932{
933 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; 933 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
934 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", 934 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
935 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); 935 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
936 return 0; 936 return 0;
937} 937}
@@ -941,7 +941,7 @@ int se_dev_set_unmap_granularity_alignment(
941 u32 unmap_granularity_alignment) 941 u32 unmap_granularity_alignment)
942{ 942{
943 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 943 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
944 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", 944 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
945 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); 945 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
946 return 0; 946 return 0;
947} 947}
@@ -949,19 +949,19 @@ int se_dev_set_unmap_granularity_alignment(
949int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 949int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
950{ 950{
951 if ((flag != 0) && (flag != 1)) { 951 if ((flag != 0) && (flag != 1)) {
952 printk(KERN_ERR "Illegal value %d\n", flag); 952 pr_err("Illegal value %d\n", flag);
953 return -EINVAL; 953 return -EINVAL;
954 } 954 }
955 if (dev->transport->dpo_emulated == NULL) { 955 if (dev->transport->dpo_emulated == NULL) {
956 printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n"); 956 pr_err("dev->transport->dpo_emulated is NULL\n");
957 return -EINVAL; 957 return -EINVAL;
958 } 958 }
959 if (dev->transport->dpo_emulated(dev) == 0) { 959 if (dev->transport->dpo_emulated(dev) == 0) {
960 printk(KERN_ERR "dev->transport->dpo_emulated not supported\n"); 960 pr_err("dev->transport->dpo_emulated not supported\n");
961 return -EINVAL; 961 return -EINVAL;
962 } 962 }
963 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; 963 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
964 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" 964 pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
965 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); 965 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
966 return 0; 966 return 0;
967} 967}
@@ -969,19 +969,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
969int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 969int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
970{ 970{
971 if ((flag != 0) && (flag != 1)) { 971 if ((flag != 0) && (flag != 1)) {
972 printk(KERN_ERR "Illegal value %d\n", flag); 972 pr_err("Illegal value %d\n", flag);
973 return -EINVAL; 973 return -EINVAL;
974 } 974 }
975 if (dev->transport->fua_write_emulated == NULL) { 975 if (dev->transport->fua_write_emulated == NULL) {
976 printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n"); 976 pr_err("dev->transport->fua_write_emulated is NULL\n");
977 return -EINVAL; 977 return -EINVAL;
978 } 978 }
979 if (dev->transport->fua_write_emulated(dev) == 0) { 979 if (dev->transport->fua_write_emulated(dev) == 0) {
980 printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n"); 980 pr_err("dev->transport->fua_write_emulated not supported\n");
981 return -EINVAL; 981 return -EINVAL;
982 } 982 }
983 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 983 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
984 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 984 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
985 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); 985 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
986 return 0; 986 return 0;
987} 987}
@@ -989,19 +989,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
989int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 989int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
990{ 990{
991 if ((flag != 0) && (flag != 1)) { 991 if ((flag != 0) && (flag != 1)) {
992 printk(KERN_ERR "Illegal value %d\n", flag); 992 pr_err("Illegal value %d\n", flag);
993 return -EINVAL; 993 return -EINVAL;
994 } 994 }
995 if (dev->transport->fua_read_emulated == NULL) { 995 if (dev->transport->fua_read_emulated == NULL) {
996 printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n"); 996 pr_err("dev->transport->fua_read_emulated is NULL\n");
997 return -EINVAL; 997 return -EINVAL;
998 } 998 }
999 if (dev->transport->fua_read_emulated(dev) == 0) { 999 if (dev->transport->fua_read_emulated(dev) == 0) {
1000 printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n"); 1000 pr_err("dev->transport->fua_read_emulated not supported\n");
1001 return -EINVAL; 1001 return -EINVAL;
1002 } 1002 }
1003 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; 1003 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
1004 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", 1004 pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
1005 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); 1005 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
1006 return 0; 1006 return 0;
1007} 1007}
@@ -1009,19 +1009,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1009int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 1009int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1010{ 1010{
1011 if ((flag != 0) && (flag != 1)) { 1011 if ((flag != 0) && (flag != 1)) {
1012 printk(KERN_ERR "Illegal value %d\n", flag); 1012 pr_err("Illegal value %d\n", flag);
1013 return -EINVAL; 1013 return -EINVAL;
1014 } 1014 }
1015 if (dev->transport->write_cache_emulated == NULL) { 1015 if (dev->transport->write_cache_emulated == NULL) {
1016 printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n"); 1016 pr_err("dev->transport->write_cache_emulated is NULL\n");
1017 return -EINVAL; 1017 return -EINVAL;
1018 } 1018 }
1019 if (dev->transport->write_cache_emulated(dev) == 0) { 1019 if (dev->transport->write_cache_emulated(dev) == 0) {
1020 printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n"); 1020 pr_err("dev->transport->write_cache_emulated not supported\n");
1021 return -EINVAL; 1021 return -EINVAL;
1022 } 1022 }
1023 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 1023 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1024 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 1024 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1025 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); 1025 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1026 return 0; 1026 return 0;
1027} 1027}
@@ -1029,19 +1029,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1029int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 1029int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1030{ 1030{
1031 if ((flag != 0) && (flag != 1) && (flag != 2)) { 1031 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1032 printk(KERN_ERR "Illegal value %d\n", flag); 1032 pr_err("Illegal value %d\n", flag);
1033 return -EINVAL; 1033 return -EINVAL;
1034 } 1034 }
1035 1035
1036 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1036 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1037 printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1037 pr_err("dev[%p]: Unable to change SE Device"
1038 " UA_INTRLCK_CTRL while dev_export_obj: %d count" 1038 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1039 " exists\n", dev, 1039 " exists\n", dev,
1040 atomic_read(&dev->dev_export_obj.obj_access_count)); 1040 atomic_read(&dev->dev_export_obj.obj_access_count));
1041 return -EINVAL; 1041 return -EINVAL;
1042 } 1042 }
1043 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; 1043 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1044 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 1044 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1045 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); 1045 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1046 1046
1047 return 0; 1047 return 0;
@@ -1050,18 +1050,18 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1050int se_dev_set_emulate_tas(struct se_device *dev, int flag) 1050int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1051{ 1051{
1052 if ((flag != 0) && (flag != 1)) { 1052 if ((flag != 0) && (flag != 1)) {
1053 printk(KERN_ERR "Illegal value %d\n", flag); 1053 pr_err("Illegal value %d\n", flag);
1054 return -EINVAL; 1054 return -EINVAL;
1055 } 1055 }
1056 1056
1057 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1057 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1058 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" 1058 pr_err("dev[%p]: Unable to change SE Device TAS while"
1059 " dev_export_obj: %d count exists\n", dev, 1059 " dev_export_obj: %d count exists\n", dev,
1060 atomic_read(&dev->dev_export_obj.obj_access_count)); 1060 atomic_read(&dev->dev_export_obj.obj_access_count));
1061 return -EINVAL; 1061 return -EINVAL;
1062 } 1062 }
1063 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; 1063 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1064 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 1064 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1065 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 1065 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1066 1066
1067 return 0; 1067 return 0;
@@ -1070,20 +1070,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1070int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 1070int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1071{ 1071{
1072 if ((flag != 0) && (flag != 1)) { 1072 if ((flag != 0) && (flag != 1)) {
1073 printk(KERN_ERR "Illegal value %d\n", flag); 1073 pr_err("Illegal value %d\n", flag);
1074 return -EINVAL; 1074 return -EINVAL;
1075 } 1075 }
1076 /* 1076 /*
1077 * We expect this value to be non-zero when generic Block Layer 1077 * We expect this value to be non-zero when generic Block Layer
1078 * Discard supported is detected iblock_create_virtdevice(). 1078 * Discard supported is detected iblock_create_virtdevice().
1079 */ 1079 */
1080 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { 1080 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1081 printk(KERN_ERR "Generic Block Discard not supported\n"); 1081 pr_err("Generic Block Discard not supported\n");
1082 return -ENOSYS; 1082 return -ENOSYS;
1083 } 1083 }
1084 1084
1085 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; 1085 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1086 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 1086 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1087 dev, flag); 1087 dev, flag);
1088 return 0; 1088 return 0;
1089} 1089}
@@ -1091,20 +1091,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1091int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 1091int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1092{ 1092{
1093 if ((flag != 0) && (flag != 1)) { 1093 if ((flag != 0) && (flag != 1)) {
1094 printk(KERN_ERR "Illegal value %d\n", flag); 1094 pr_err("Illegal value %d\n", flag);
1095 return -EINVAL; 1095 return -EINVAL;
1096 } 1096 }
1097 /* 1097 /*
1098 * We expect this value to be non-zero when generic Block Layer 1098 * We expect this value to be non-zero when generic Block Layer
1099 * Discard supported is detected iblock_create_virtdevice(). 1099 * Discard supported is detected iblock_create_virtdevice().
1100 */ 1100 */
1101 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { 1101 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1102 printk(KERN_ERR "Generic Block Discard not supported\n"); 1102 pr_err("Generic Block Discard not supported\n");
1103 return -ENOSYS; 1103 return -ENOSYS;
1104 } 1104 }
1105 1105
1106 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; 1106 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1107 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 1107 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1108 dev, flag); 1108 dev, flag);
1109 return 0; 1109 return 0;
1110} 1110}
@@ -1112,11 +1112,11 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1112int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1112int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1113{ 1113{
1114 if ((flag != 0) && (flag != 1)) { 1114 if ((flag != 0) && (flag != 1)) {
1115 printk(KERN_ERR "Illegal value %d\n", flag); 1115 pr_err("Illegal value %d\n", flag);
1116 return -EINVAL; 1116 return -EINVAL;
1117 } 1117 }
1118 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; 1118 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1119 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1119 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1120 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1120 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1121 return 0; 1121 return 0;
1122} 1122}
@@ -1141,20 +1141,20 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1141 u32 orig_queue_depth = dev->queue_depth; 1141 u32 orig_queue_depth = dev->queue_depth;
1142 1142
1143 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1143 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1144 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" 1144 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1145 " dev_export_obj: %d count exists\n", dev, 1145 " dev_export_obj: %d count exists\n", dev,
1146 atomic_read(&dev->dev_export_obj.obj_access_count)); 1146 atomic_read(&dev->dev_export_obj.obj_access_count));
1147 return -EINVAL; 1147 return -EINVAL;
1148 } 1148 }
1149 if (!(queue_depth)) { 1149 if (!queue_depth) {
1150 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" 1150 pr_err("dev[%p]: Illegal ZERO value for queue"
1151 "_depth\n", dev); 1151 "_depth\n", dev);
1152 return -EINVAL; 1152 return -EINVAL;
1153 } 1153 }
1154 1154
1155 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1155 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1156 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1156 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1157 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" 1157 pr_err("dev[%p]: Passed queue_depth: %u"
1158 " exceeds TCM/SE_Device TCQ: %u\n", 1158 " exceeds TCM/SE_Device TCQ: %u\n",
1159 dev, queue_depth, 1159 dev, queue_depth,
1160 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1160 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@@ -1163,7 +1163,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1163 } else { 1163 } else {
1164 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { 1164 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1165 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1165 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1166 printk(KERN_ERR "dev[%p]: Passed queue_depth:" 1166 pr_err("dev[%p]: Passed queue_depth:"
1167 " %u exceeds TCM/SE_Device MAX" 1167 " %u exceeds TCM/SE_Device MAX"
1168 " TCQ: %u\n", dev, queue_depth, 1168 " TCQ: %u\n", dev, queue_depth,
1169 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1169 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@@ -1178,7 +1178,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1178 else if (queue_depth < orig_queue_depth) 1178 else if (queue_depth < orig_queue_depth)
1179 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); 1179 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1180 1180
1181 printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", 1181 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1182 dev, queue_depth); 1182 dev, queue_depth);
1183 return 0; 1183 return 0;
1184} 1184}
@@ -1188,41 +1188,41 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1188 int force = 0; /* Force setting for VDEVS */ 1188 int force = 0; /* Force setting for VDEVS */
1189 1189
1190 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1190 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1191 printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1191 pr_err("dev[%p]: Unable to change SE Device"
1192 " max_sectors while dev_export_obj: %d count exists\n", 1192 " max_sectors while dev_export_obj: %d count exists\n",
1193 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1193 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1194 return -EINVAL; 1194 return -EINVAL;
1195 } 1195 }
1196 if (!(max_sectors)) { 1196 if (!max_sectors) {
1197 printk(KERN_ERR "dev[%p]: Illegal ZERO value for" 1197 pr_err("dev[%p]: Illegal ZERO value for"
1198 " max_sectors\n", dev); 1198 " max_sectors\n", dev);
1199 return -EINVAL; 1199 return -EINVAL;
1200 } 1200 }
1201 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1201 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1202 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" 1202 pr_err("dev[%p]: Passed max_sectors: %u less than"
1203 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, 1203 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1204 DA_STATUS_MAX_SECTORS_MIN); 1204 DA_STATUS_MAX_SECTORS_MIN);
1205 return -EINVAL; 1205 return -EINVAL;
1206 } 1206 }
1207 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1207 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1208 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1208 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1209 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1209 pr_err("dev[%p]: Passed max_sectors: %u"
1210 " greater than TCM/SE_Device max_sectors:" 1210 " greater than TCM/SE_Device max_sectors:"
1211 " %u\n", dev, max_sectors, 1211 " %u\n", dev, max_sectors,
1212 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1212 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1213 return -EINVAL; 1213 return -EINVAL;
1214 } 1214 }
1215 } else { 1215 } else {
1216 if (!(force) && (max_sectors > 1216 if (!force && (max_sectors >
1217 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { 1217 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1218 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1218 pr_err("dev[%p]: Passed max_sectors: %u"
1219 " greater than TCM/SE_Device max_sectors" 1219 " greater than TCM/SE_Device max_sectors"
1220 ": %u, use force=1 to override.\n", dev, 1220 ": %u, use force=1 to override.\n", dev,
1221 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1221 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1222 return -EINVAL; 1222 return -EINVAL;
1223 } 1223 }
1224 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1224 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1225 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1225 pr_err("dev[%p]: Passed max_sectors: %u"
1226 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1226 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1227 " %u\n", dev, max_sectors, 1227 " %u\n", dev, max_sectors,
1228 DA_STATUS_MAX_SECTORS_MAX); 1228 DA_STATUS_MAX_SECTORS_MAX);
@@ -1231,7 +1231,7 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1231 } 1231 }
1232 1232
1233 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1233 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1234 printk("dev[%p]: SE Device max_sectors changed to %u\n", 1234 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1235 dev, max_sectors); 1235 dev, max_sectors);
1236 return 0; 1236 return 0;
1237} 1237}
@@ -1239,25 +1239,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1239int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1239int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1240{ 1240{
1241 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1241 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1242 printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1242 pr_err("dev[%p]: Unable to change SE Device"
1243 " optimal_sectors while dev_export_obj: %d count exists\n", 1243 " optimal_sectors while dev_export_obj: %d count exists\n",
1244 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1244 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1245 return -EINVAL; 1245 return -EINVAL;
1246 } 1246 }
1247 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1247 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1248 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" 1248 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1249 " changed for TCM/pSCSI\n", dev); 1249 " changed for TCM/pSCSI\n", dev);
1250 return -EINVAL; 1250 return -EINVAL;
1251 } 1251 }
1252 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { 1252 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1253 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" 1253 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1254 " greater than max_sectors: %u\n", dev, 1254 " greater than max_sectors: %u\n", dev,
1255 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 1255 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1256 return -EINVAL; 1256 return -EINVAL;
1257 } 1257 }
1258 1258
1259 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; 1259 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1260 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", 1260 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1261 dev, optimal_sectors); 1261 dev, optimal_sectors);
1262 return 0; 1262 return 0;
1263} 1263}
@@ -1265,7 +1265,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1265int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1265int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1266{ 1266{
1267 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1267 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1268 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" 1268 pr_err("dev[%p]: Unable to change SE Device block_size"
1269 " while dev_export_obj: %d count exists\n", dev, 1269 " while dev_export_obj: %d count exists\n", dev,
1270 atomic_read(&dev->dev_export_obj.obj_access_count)); 1270 atomic_read(&dev->dev_export_obj.obj_access_count));
1271 return -EINVAL; 1271 return -EINVAL;
@@ -1275,21 +1275,21 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1275 (block_size != 1024) && 1275 (block_size != 1024) &&
1276 (block_size != 2048) && 1276 (block_size != 2048) &&
1277 (block_size != 4096)) { 1277 (block_size != 4096)) {
1278 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" 1278 pr_err("dev[%p]: Illegal value for block_device: %u"
1279 " for SE device, must be 512, 1024, 2048 or 4096\n", 1279 " for SE device, must be 512, 1024, 2048 or 4096\n",
1280 dev, block_size); 1280 dev, block_size);
1281 return -EINVAL; 1281 return -EINVAL;
1282 } 1282 }
1283 1283
1284 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1284 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1285 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" 1285 pr_err("dev[%p]: Not allowed to change block_size for"
1286 " Physical Device, use for Linux/SCSI to change" 1286 " Physical Device, use for Linux/SCSI to change"
1287 " block_size for underlying hardware\n", dev); 1287 " block_size for underlying hardware\n", dev);
1288 return -EINVAL; 1288 return -EINVAL;
1289 } 1289 }
1290 1290
1291 dev->se_sub_dev->se_dev_attrib.block_size = block_size; 1291 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1292 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", 1292 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1293 dev, block_size); 1293 dev, block_size);
1294 return 0; 1294 return 0;
1295} 1295}
@@ -1304,13 +1304,13 @@ struct se_lun *core_dev_add_lun(
1304 u32 lun_access = 0; 1304 u32 lun_access = 0;
1305 1305
1306 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1306 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1307 printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", 1307 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1308 atomic_read(&dev->dev_access_obj.obj_access_count)); 1308 atomic_read(&dev->dev_access_obj.obj_access_count));
1309 return NULL; 1309 return NULL;
1310 } 1310 }
1311 1311
1312 lun_p = core_tpg_pre_addlun(tpg, lun); 1312 lun_p = core_tpg_pre_addlun(tpg, lun);
1313 if ((IS_ERR(lun_p)) || !(lun_p)) 1313 if ((IS_ERR(lun_p)) || !lun_p)
1314 return NULL; 1314 return NULL;
1315 1315
1316 if (dev->dev_flags & DF_READ_ONLY) 1316 if (dev->dev_flags & DF_READ_ONLY)
@@ -1321,7 +1321,7 @@ struct se_lun *core_dev_add_lun(
1321 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) 1321 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1322 return NULL; 1322 return NULL;
1323 1323
1324 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1324 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1325 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1325 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1326 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1326 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1327 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); 1327 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
@@ -1357,12 +1357,12 @@ int core_dev_del_lun(
1357 int ret = 0; 1357 int ret = 0;
1358 1358
1359 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); 1359 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1360 if (!(lun)) 1360 if (!lun)
1361 return ret; 1361 return ret;
1362 1362
1363 core_tpg_post_dellun(tpg, lun); 1363 core_tpg_post_dellun(tpg, lun);
1364 1364
1365 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1365 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1366 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1366 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1367 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1367 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1368 tpg->se_tpg_tfo->get_fabric_name()); 1368 tpg->se_tpg_tfo->get_fabric_name());
@@ -1376,7 +1376,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
1376 1376
1377 spin_lock(&tpg->tpg_lun_lock); 1377 spin_lock(&tpg->tpg_lun_lock);
1378 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1378 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1379 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1379 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1380 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1380 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1381 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1381 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1382 TRANSPORT_MAX_LUNS_PER_TPG-1, 1382 TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -1387,7 +1387,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
1387 lun = &tpg->tpg_lun_list[unpacked_lun]; 1387 lun = &tpg->tpg_lun_list[unpacked_lun];
1388 1388
1389 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1389 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1390 printk(KERN_ERR "%s Logical Unit Number: %u is not free on" 1390 pr_err("%s Logical Unit Number: %u is not free on"
1391 " Target Portal Group: %hu, ignoring request.\n", 1391 " Target Portal Group: %hu, ignoring request.\n",
1392 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1392 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1393 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1393 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1409,7 +1409,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1409 1409
1410 spin_lock(&tpg->tpg_lun_lock); 1410 spin_lock(&tpg->tpg_lun_lock);
1411 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1411 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1412 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1412 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1413 "_TPG-1: %u for Target Portal Group: %hu\n", 1413 "_TPG-1: %u for Target Portal Group: %hu\n",
1414 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1414 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1415 TRANSPORT_MAX_LUNS_PER_TPG-1, 1415 TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -1420,7 +1420,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1420 lun = &tpg->tpg_lun_list[unpacked_lun]; 1420 lun = &tpg->tpg_lun_list[unpacked_lun];
1421 1421
1422 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1422 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1423 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1423 pr_err("%s Logical Unit Number: %u is not active on"
1424 " Target Portal Group: %hu, ignoring request.\n", 1424 " Target Portal Group: %hu, ignoring request.\n",
1425 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1425 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1426 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1426 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1442,19 +1442,19 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1442 struct se_node_acl *nacl; 1442 struct se_node_acl *nacl;
1443 1443
1444 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1444 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1445 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", 1445 pr_err("%s InitiatorName exceeds maximum size.\n",
1446 tpg->se_tpg_tfo->get_fabric_name()); 1446 tpg->se_tpg_tfo->get_fabric_name());
1447 *ret = -EOVERFLOW; 1447 *ret = -EOVERFLOW;
1448 return NULL; 1448 return NULL;
1449 } 1449 }
1450 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 1450 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1451 if (!(nacl)) { 1451 if (!nacl) {
1452 *ret = -EINVAL; 1452 *ret = -EINVAL;
1453 return NULL; 1453 return NULL;
1454 } 1454 }
1455 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1455 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1456 if (!(lacl)) { 1456 if (!lacl) {
1457 printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); 1457 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1458 *ret = -ENOMEM; 1458 *ret = -ENOMEM;
1459 return NULL; 1459 return NULL;
1460 } 1460 }
@@ -1477,8 +1477,8 @@ int core_dev_add_initiator_node_lun_acl(
1477 struct se_node_acl *nacl; 1477 struct se_node_acl *nacl;
1478 1478
1479 lun = core_dev_get_lun(tpg, unpacked_lun); 1479 lun = core_dev_get_lun(tpg, unpacked_lun);
1480 if (!(lun)) { 1480 if (!lun) {
1481 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1481 pr_err("%s Logical Unit Number: %u is not active on"
1482 " Target Portal Group: %hu, ignoring request.\n", 1482 " Target Portal Group: %hu, ignoring request.\n",
1483 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1483 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1484 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1484 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1486,7 +1486,7 @@ int core_dev_add_initiator_node_lun_acl(
1486 } 1486 }
1487 1487
1488 nacl = lacl->se_lun_nacl; 1488 nacl = lacl->se_lun_nacl;
1489 if (!(nacl)) 1489 if (!nacl)
1490 return -EINVAL; 1490 return -EINVAL;
1491 1491
1492 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1492 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
@@ -1505,7 +1505,7 @@ int core_dev_add_initiator_node_lun_acl(
1505 smp_mb__after_atomic_inc(); 1505 smp_mb__after_atomic_inc();
1506 spin_unlock(&lun->lun_acl_lock); 1506 spin_unlock(&lun->lun_acl_lock);
1507 1507
1508 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1508 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1509 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1509 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1510 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1510 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1511 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1511 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
@@ -1530,7 +1530,7 @@ int core_dev_del_initiator_node_lun_acl(
1530 struct se_node_acl *nacl; 1530 struct se_node_acl *nacl;
1531 1531
1532 nacl = lacl->se_lun_nacl; 1532 nacl = lacl->se_lun_nacl;
1533 if (!(nacl)) 1533 if (!nacl)
1534 return -EINVAL; 1534 return -EINVAL;
1535 1535
1536 spin_lock(&lun->lun_acl_lock); 1536 spin_lock(&lun->lun_acl_lock);
@@ -1544,7 +1544,7 @@ int core_dev_del_initiator_node_lun_acl(
1544 1544
1545 lacl->se_lun = NULL; 1545 lacl->se_lun = NULL;
1546 1546
1547 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1547 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1548 " InitiatorNode: %s Mapped LUN: %u\n", 1548 " InitiatorNode: %s Mapped LUN: %u\n",
1549 tpg->se_tpg_tfo->get_fabric_name(), 1549 tpg->se_tpg_tfo->get_fabric_name(),
1550 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1550 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@@ -1557,7 +1557,7 @@ void core_dev_free_initiator_node_lun_acl(
1557 struct se_portal_group *tpg, 1557 struct se_portal_group *tpg,
1558 struct se_lun_acl *lacl) 1558 struct se_lun_acl *lacl)
1559{ 1559{
1560 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1560 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1561 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1561 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1562 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1562 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1563 tpg->se_tpg_tfo->get_fabric_name(), 1563 tpg->se_tpg_tfo->get_fabric_name(),
@@ -1575,7 +1575,7 @@ int core_dev_setup_virtual_lun0(void)
1575 char buf[16]; 1575 char buf[16];
1576 int ret; 1576 int ret;
1577 1577
1578 hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); 1578 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1579 if (IS_ERR(hba)) 1579 if (IS_ERR(hba))
1580 return PTR_ERR(hba); 1580 return PTR_ERR(hba);
1581 1581
@@ -1583,8 +1583,8 @@ int core_dev_setup_virtual_lun0(void)
1583 t = hba->transport; 1583 t = hba->transport;
1584 1584
1585 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 1585 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1586 if (!(se_dev)) { 1586 if (!se_dev) {
1587 printk(KERN_ERR "Unable to allocate memory for" 1587 pr_err("Unable to allocate memory for"
1588 " struct se_subsystem_dev\n"); 1588 " struct se_subsystem_dev\n");
1589 ret = -ENOMEM; 1589 ret = -ENOMEM;
1590 goto out; 1590 goto out;
@@ -1606,8 +1606,8 @@ int core_dev_setup_virtual_lun0(void)
1606 se_dev->se_dev_hba = hba; 1606 se_dev->se_dev_hba = hba;
1607 1607
1608 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); 1608 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1609 if (!(se_dev->se_dev_su_ptr)) { 1609 if (!se_dev->se_dev_su_ptr) {
1610 printk(KERN_ERR "Unable to locate subsystem dependent pointer" 1610 pr_err("Unable to locate subsystem dependent pointer"
1611 " from allocate_virtdevice()\n"); 1611 " from allocate_virtdevice()\n");
1612 ret = -ENOMEM; 1612 ret = -ENOMEM;
1613 goto out; 1613 goto out;
@@ -1643,7 +1643,7 @@ void core_dev_release_virtual_lun0(void)
1643 struct se_hba *hba = lun0_hba; 1643 struct se_hba *hba = lun0_hba;
1644 struct se_subsystem_dev *su_dev = lun0_su_dev; 1644 struct se_subsystem_dev *su_dev = lun0_su_dev;
1645 1645
1646 if (!(hba)) 1646 if (!hba)
1647 return; 1647 return;
1648 1648
1649 if (g_lun0_dev) 1649 if (g_lun0_dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 0b1659d0fefc..f1654694f4ea 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
60 cit->ct_group_ops = _group_ops; \ 60 cit->ct_group_ops = _group_ops; \
61 cit->ct_attrs = _attrs; \ 61 cit->ct_attrs = _attrs; \
62 cit->ct_owner = tf->tf_module; \ 62 cit->ct_owner = tf->tf_module; \
63 printk("Setup generic %s\n", __stringify(_name)); \ 63 pr_debug("Setup generic %s\n", __stringify(_name)); \
64} 64}
65 65
66/* Start of tfc_tpg_mappedlun_cit */ 66/* Start of tfc_tpg_mappedlun_cit */
@@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link(
80 /* 80 /*
81 * Ensure that the source port exists 81 * Ensure that the source port exists
82 */ 82 */
83 if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) { 83 if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
84 printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep" 84 pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
85 "_tpg does not exist\n"); 85 "_tpg does not exist\n");
86 return -EINVAL; 86 return -EINVAL;
87 } 87 }
@@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link(
96 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT 96 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
97 */ 97 */
98 if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { 98 if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
99 printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n", 99 pr_err("Illegal Initiator ACL SymLink outside of %s\n",
100 config_item_name(wwn_ci)); 100 config_item_name(wwn_ci));
101 return -EINVAL; 101 return -EINVAL;
102 } 102 }
103 if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { 103 if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
104 printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s" 104 pr_err("Illegal Initiator ACL Symlink outside of %s"
105 " TPGT: %s\n", config_item_name(wwn_ci), 105 " TPGT: %s\n", config_item_name(wwn_ci),
106 config_item_name(tpg_ci)); 106 config_item_name(tpg_ci));
107 return -EINVAL; 107 return -EINVAL;
@@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink(
147 /* 147 /*
148 * Determine if the underlying MappedLUN has already been released.. 148 * Determine if the underlying MappedLUN has already been released..
149 */ 149 */
150 if (!(deve->se_lun)) 150 if (!deve->se_lun)
151 return 0; 151 return 0;
152 152
153 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); 153 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
@@ -202,7 +202,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
202 TRANSPORT_LUNFLAGS_READ_WRITE, 202 TRANSPORT_LUNFLAGS_READ_WRITE,
203 lacl->se_lun_nacl); 203 lacl->se_lun_nacl);
204 204
205 printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" 205 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
206 " Mapped LUN: %u Write Protect bit to %s\n", 206 " Mapped LUN: %u Write Protect bit to %s\n",
207 se_tpg->se_tpg_tfo->get_fabric_name(), 207 se_tpg->se_tpg_tfo->get_fabric_name(),
208 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); 208 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
@@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun(
327 int ret = 0; 327 int ret = 0;
328 328
329 acl_ci = &group->cg_item; 329 acl_ci = &group->cg_item;
330 if (!(acl_ci)) { 330 if (!acl_ci) {
331 printk(KERN_ERR "Unable to locatel acl_ci\n"); 331 pr_err("Unable to locatel acl_ci\n");
332 return NULL; 332 return NULL;
333 } 333 }
334 334
335 buf = kzalloc(strlen(name) + 1, GFP_KERNEL); 335 buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
336 if (!(buf)) { 336 if (!buf) {
337 printk(KERN_ERR "Unable to allocate memory for name buf\n"); 337 pr_err("Unable to allocate memory for name buf\n");
338 return ERR_PTR(-ENOMEM); 338 return ERR_PTR(-ENOMEM);
339 } 339 }
340 snprintf(buf, strlen(name) + 1, "%s", name); 340 snprintf(buf, strlen(name) + 1, "%s", name);
@@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun(
342 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. 342 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
343 */ 343 */
344 if (strstr(buf, "lun_") != buf) { 344 if (strstr(buf, "lun_") != buf) {
345 printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s" 345 pr_err("Unable to locate \"lun_\" from buf: %s"
346 " name: %s\n", buf, name); 346 " name: %s\n", buf, name);
347 ret = -EINVAL; 347 ret = -EINVAL;
348 goto out; 348 goto out;
@@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun(
358 358
359 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, 359 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
360 config_item_name(acl_ci), &ret); 360 config_item_name(acl_ci), &ret);
361 if (!(lacl)) { 361 if (!lacl) {
362 ret = -EINVAL; 362 ret = -EINVAL;
363 goto out; 363 goto out;
364 } 364 }
@@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun(
367 lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 367 lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
368 GFP_KERNEL); 368 GFP_KERNEL);
369 if (!lacl_cg->default_groups) { 369 if (!lacl_cg->default_groups) {
370 printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); 370 pr_err("Unable to allocate lacl_cg->default_groups\n");
371 ret = -ENOMEM; 371 ret = -ENOMEM;
372 goto out; 372 goto out;
373 } 373 }
@@ -383,7 +383,7 @@ static struct config_group *target_fabric_make_mappedlun(
383 ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 383 ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
384 GFP_KERNEL); 384 GFP_KERNEL);
385 if (!ml_stat_grp->default_groups) { 385 if (!ml_stat_grp->default_groups) {
386 printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); 386 pr_err("Unable to allocate ml_stat_grp->default_groups\n");
387 ret = -ENOMEM; 387 ret = -ENOMEM;
388 goto out; 388 goto out;
389 } 389 }
@@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl(
474 struct se_node_acl *se_nacl; 474 struct se_node_acl *se_nacl;
475 struct config_group *nacl_cg; 475 struct config_group *nacl_cg;
476 476
477 if (!(tf->tf_ops.fabric_make_nodeacl)) { 477 if (!tf->tf_ops.fabric_make_nodeacl) {
478 printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n"); 478 pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
479 return ERR_PTR(-ENOSYS); 479 return ERR_PTR(-ENOSYS);
480 } 480 }
481 481
@@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np(
572 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 572 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
573 struct se_tpg_np *se_tpg_np; 573 struct se_tpg_np *se_tpg_np;
574 574
575 if (!(tf->tf_ops.fabric_make_np)) { 575 if (!tf->tf_ops.fabric_make_np) {
576 printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n"); 576 pr_err("tf->tf_ops.fabric_make_np is NULL\n");
577 return ERR_PTR(-ENOSYS); 577 return ERR_PTR(-ENOSYS);
578 } 578 }
579 579
580 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); 580 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
581 if (!(se_tpg_np) || IS_ERR(se_tpg_np)) 581 if (!se_tpg_np || IS_ERR(se_tpg_np))
582 return ERR_PTR(-EINVAL); 582 return ERR_PTR(-EINVAL);
583 583
584 se_tpg_np->tpg_np_parent = se_tpg; 584 se_tpg_np->tpg_np_parent = se_tpg;
@@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
627 struct se_lun *lun, 627 struct se_lun *lun,
628 char *page) 628 char *page)
629{ 629{
630 if (!(lun)) 630 if (!lun || !lun->lun_sep)
631 return -ENODEV;
632
633 if (!(lun->lun_sep))
634 return -ENODEV; 631 return -ENODEV;
635 632
636 return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); 633 return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
@@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
641 const char *page, 638 const char *page,
642 size_t count) 639 size_t count)
643{ 640{
644 if (!(lun)) 641 if (!lun || !lun->lun_sep)
645 return -ENODEV;
646
647 if (!(lun->lun_sep))
648 return -ENODEV; 642 return -ENODEV;
649 643
650 return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); 644 return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
@@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
659 struct se_lun *lun, 653 struct se_lun *lun,
660 char *page) 654 char *page)
661{ 655{
662 if (!(lun)) 656 if (!lun || !lun->lun_sep)
663 return -ENODEV;
664
665 if (!(lun->lun_sep))
666 return -ENODEV; 657 return -ENODEV;
667 658
668 return core_alua_show_offline_bit(lun, page); 659 return core_alua_show_offline_bit(lun, page);
@@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
673 const char *page, 664 const char *page,
674 size_t count) 665 size_t count)
675{ 666{
676 if (!(lun)) 667 if (!lun || !lun->lun_sep)
677 return -ENODEV;
678
679 if (!(lun->lun_sep))
680 return -ENODEV; 668 return -ENODEV;
681 669
682 return core_alua_store_offline_bit(lun, page, count); 670 return core_alua_store_offline_bit(lun, page, count);
@@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
691 struct se_lun *lun, 679 struct se_lun *lun,
692 char *page) 680 char *page)
693{ 681{
694 if (!(lun)) 682 if (!lun || !lun->lun_sep)
695 return -ENODEV;
696
697 if (!(lun->lun_sep))
698 return -ENODEV; 683 return -ENODEV;
699 684
700 return core_alua_show_secondary_status(lun, page); 685 return core_alua_show_secondary_status(lun, page);
@@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
705 const char *page, 690 const char *page,
706 size_t count) 691 size_t count)
707{ 692{
708 if (!(lun)) 693 if (!lun || !lun->lun_sep)
709 return -ENODEV;
710
711 if (!(lun->lun_sep))
712 return -ENODEV; 694 return -ENODEV;
713 695
714 return core_alua_store_secondary_status(lun, page, count); 696 return core_alua_store_secondary_status(lun, page, count);
@@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
723 struct se_lun *lun, 705 struct se_lun *lun,
724 char *page) 706 char *page)
725{ 707{
726 if (!(lun)) 708 if (!lun || !lun->lun_sep)
727 return -ENODEV;
728
729 if (!(lun->lun_sep))
730 return -ENODEV; 709 return -ENODEV;
731 710
732 return core_alua_show_secondary_write_metadata(lun, page); 711 return core_alua_show_secondary_write_metadata(lun, page);
@@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
737 const char *page, 716 const char *page,
738 size_t count) 717 size_t count)
739{ 718{
740 if (!(lun)) 719 if (!lun || !lun->lun_sep)
741 return -ENODEV;
742
743 if (!(lun->lun_sep))
744 return -ENODEV; 720 return -ENODEV;
745 721
746 return core_alua_store_secondary_write_metadata(lun, page, count); 722 return core_alua_store_secondary_write_metadata(lun, page, count);
@@ -781,13 +757,13 @@ static int target_fabric_port_link(
781 tf = se_tpg->se_tpg_wwn->wwn_tf; 757 tf = se_tpg->se_tpg_wwn->wwn_tf;
782 758
783 if (lun->lun_se_dev != NULL) { 759 if (lun->lun_se_dev != NULL) {
784 printk(KERN_ERR "Port Symlink already exists\n"); 760 pr_err("Port Symlink already exists\n");
785 return -EEXIST; 761 return -EEXIST;
786 } 762 }
787 763
788 dev = se_dev->se_dev_ptr; 764 dev = se_dev->se_dev_ptr;
789 if (!(dev)) { 765 if (!dev) {
790 printk(KERN_ERR "Unable to locate struct se_device pointer from" 766 pr_err("Unable to locate struct se_device pointer from"
791 " %s\n", config_item_name(se_dev_ci)); 767 " %s\n", config_item_name(se_dev_ci));
792 ret = -ENODEV; 768 ret = -ENODEV;
793 goto out; 769 goto out;
@@ -795,8 +771,8 @@ static int target_fabric_port_link(
795 771
796 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, 772 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
797 lun->unpacked_lun); 773 lun->unpacked_lun);
798 if ((IS_ERR(lun_p)) || !(lun_p)) { 774 if (IS_ERR(lun_p) || !lun_p) {
799 printk(KERN_ERR "core_dev_add_lun() failed\n"); 775 pr_err("core_dev_add_lun() failed\n");
800 ret = -EINVAL; 776 ret = -EINVAL;
801 goto out; 777 goto out;
802 } 778 }
@@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun(
888 int errno; 864 int errno;
889 865
890 if (strstr(name, "lun_") != name) { 866 if (strstr(name, "lun_") != name) {
891 printk(KERN_ERR "Unable to locate \'_\" in" 867 pr_err("Unable to locate \'_\" in"
892 " \"lun_$LUN_NUMBER\"\n"); 868 " \"lun_$LUN_NUMBER\"\n");
893 return ERR_PTR(-EINVAL); 869 return ERR_PTR(-EINVAL);
894 } 870 }
@@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun(
896 return ERR_PTR(-EINVAL); 872 return ERR_PTR(-EINVAL);
897 873
898 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); 874 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
899 if (!(lun)) 875 if (!lun)
900 return ERR_PTR(-EINVAL); 876 return ERR_PTR(-EINVAL);
901 877
902 lun_cg = &lun->lun_group; 878 lun_cg = &lun->lun_group;
903 lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 879 lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
904 GFP_KERNEL); 880 GFP_KERNEL);
905 if (!lun_cg->default_groups) { 881 if (!lun_cg->default_groups) {
906 printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); 882 pr_err("Unable to allocate lun_cg->default_groups\n");
907 return ERR_PTR(-ENOMEM); 883 return ERR_PTR(-ENOMEM);
908 } 884 }
909 885
@@ -918,7 +894,7 @@ static struct config_group *target_fabric_make_lun(
918 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 894 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
919 GFP_KERNEL); 895 GFP_KERNEL);
920 if (!port_stat_grp->default_groups) { 896 if (!port_stat_grp->default_groups) {
921 printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); 897 pr_err("Unable to allocate port_stat_grp->default_groups\n");
922 errno = -ENOMEM; 898 errno = -ENOMEM;
923 goto out; 899 goto out;
924 } 900 }
@@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg(
1031 struct target_fabric_configfs *tf = wwn->wwn_tf; 1007 struct target_fabric_configfs *tf = wwn->wwn_tf;
1032 struct se_portal_group *se_tpg; 1008 struct se_portal_group *se_tpg;
1033 1009
1034 if (!(tf->tf_ops.fabric_make_tpg)) { 1010 if (!tf->tf_ops.fabric_make_tpg) {
1035 printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n"); 1011 pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
1036 return ERR_PTR(-ENOSYS); 1012 return ERR_PTR(-ENOSYS);
1037 } 1013 }
1038 1014
1039 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); 1015 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
1040 if (!(se_tpg) || IS_ERR(se_tpg)) 1016 if (!se_tpg || IS_ERR(se_tpg))
1041 return ERR_PTR(-EINVAL); 1017 return ERR_PTR(-EINVAL);
1042 /* 1018 /*
1043 * Setup default groups from pre-allocated se_tpg->tpg_default_groups 1019 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
@@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn(
1130 struct target_fabric_configfs, tf_group); 1106 struct target_fabric_configfs, tf_group);
1131 struct se_wwn *wwn; 1107 struct se_wwn *wwn;
1132 1108
1133 if (!(tf->tf_ops.fabric_make_wwn)) { 1109 if (!tf->tf_ops.fabric_make_wwn) {
1134 printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n"); 1110 pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
1135 return ERR_PTR(-ENOSYS); 1111 return ERR_PTR(-ENOSYS);
1136 } 1112 }
1137 1113
1138 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); 1114 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
1139 if (!(wwn) || IS_ERR(wwn)) 1115 if (!wwn || IS_ERR(wwn))
1140 return ERR_PTR(-EINVAL); 1116 return ERR_PTR(-EINVAL);
1141 1117
1142 wwn->wwn_tf = tf; 1118 wwn->wwn_tf = tf;
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 1e193f324895..a29968806cfc 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -172,7 +172,7 @@ u32 fc_get_pr_transport_id(
172 ptr = &se_nacl->initiatorname[0]; 172 ptr = &se_nacl->initiatorname[0];
173 173
174 for (i = 0; i < 24; ) { 174 for (i = 0; i < 24; ) {
175 if (!(strncmp(&ptr[i], ":", 1))) { 175 if (!strncmp(&ptr[i], ":", 1)) {
176 i++; 176 i++;
177 continue; 177 continue;
178 } 178 }
@@ -386,7 +386,7 @@ char *iscsi_parse_pr_out_transport_id(
386 * Reserved 386 * Reserved
387 */ 387 */
388 if ((format_code != 0x00) && (format_code != 0x40)) { 388 if ((format_code != 0x00) && (format_code != 0x40)) {
389 printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" 389 pr_err("Illegal format code: 0x%02x for iSCSI"
390 " Initiator Transport ID\n", format_code); 390 " Initiator Transport ID\n", format_code);
391 return NULL; 391 return NULL;
392 } 392 }
@@ -406,7 +406,7 @@ char *iscsi_parse_pr_out_transport_id(
406 tid_len += padding; 406 tid_len += padding;
407 407
408 if ((add_len + 4) != tid_len) { 408 if ((add_len + 4) != tid_len) {
409 printk(KERN_INFO "LIO-Target Extracted add_len: %hu " 409 pr_debug("LIO-Target Extracted add_len: %hu "
410 "does not match calculated tid_len: %u," 410 "does not match calculated tid_len: %u,"
411 " using tid_len instead\n", add_len+4, tid_len); 411 " using tid_len instead\n", add_len+4, tid_len);
412 *out_tid_len = tid_len; 412 *out_tid_len = tid_len;
@@ -420,8 +420,8 @@ char *iscsi_parse_pr_out_transport_id(
420 */ 420 */
421 if (format_code == 0x40) { 421 if (format_code == 0x40) {
422 p = strstr((char *)&buf[4], ",i,0x"); 422 p = strstr((char *)&buf[4], ",i,0x");
423 if (!(p)) { 423 if (!p) {
424 printk(KERN_ERR "Unable to locate \",i,0x\" seperator" 424 pr_err("Unable to locate \",i,0x\" seperator"
425 " for Initiator port identifier: %s\n", 425 " for Initiator port identifier: %s\n",
426 (char *)&buf[4]); 426 (char *)&buf[4]);
427 return NULL; 427 return NULL;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 5c47f4202386..bc1b33639b8d 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -42,18 +42,6 @@
42 42
43#include "target_core_file.h" 43#include "target_core_file.h"
44 44
45#if 1
46#define DEBUG_FD_CACHE(x...) printk(x)
47#else
48#define DEBUG_FD_CACHE(x...)
49#endif
50
51#if 1
52#define DEBUG_FD_FUA(x...) printk(x)
53#else
54#define DEBUG_FD_FUA(x...)
55#endif
56
57static struct se_subsystem_api fileio_template; 45static struct se_subsystem_api fileio_template;
58 46
59/* fd_attach_hba(): (Part of se_subsystem_api_t template) 47/* fd_attach_hba(): (Part of se_subsystem_api_t template)
@@ -65,8 +53,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
65 struct fd_host *fd_host; 53 struct fd_host *fd_host;
66 54
67 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 55 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
68 if (!(fd_host)) { 56 if (!fd_host) {
69 printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); 57 pr_err("Unable to allocate memory for struct fd_host\n");
70 return -ENOMEM; 58 return -ENOMEM;
71 } 59 }
72 60
@@ -74,10 +62,10 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
74 62
75 hba->hba_ptr = fd_host; 63 hba->hba_ptr = fd_host;
76 64
77 printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 65 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
78 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 66 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
79 TARGET_CORE_MOD_VERSION); 67 TARGET_CORE_MOD_VERSION);
80 printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 68 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
81 " MaxSectors: %u\n", 69 " MaxSectors: %u\n",
82 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); 70 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
83 71
@@ -88,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba)
88{ 76{
89 struct fd_host *fd_host = hba->hba_ptr; 77 struct fd_host *fd_host = hba->hba_ptr;
90 78
91 printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 79 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
92 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 80 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
93 81
94 kfree(fd_host); 82 kfree(fd_host);
@@ -101,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
101 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 89 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
102 90
103 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
104 if (!(fd_dev)) { 92 if (!fd_dev) {
105 printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); 93 pr_err("Unable to allocate memory for struct fd_dev\n");
106 return NULL; 94 return NULL;
107 } 95 }
108 96
109 fd_dev->fd_host = fd_host; 97 fd_dev->fd_host = fd_host;
110 98
111 printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); 99 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
112 100
113 return fd_dev; 101 return fd_dev;
114} 102}
@@ -141,7 +129,7 @@ static struct se_device *fd_create_virtdevice(
141 set_fs(old_fs); 129 set_fs(old_fs);
142 130
143 if (IS_ERR(dev_p)) { 131 if (IS_ERR(dev_p)) {
144 printk(KERN_ERR "getname(%s) failed: %lu\n", 132 pr_err("getname(%s) failed: %lu\n",
145 fd_dev->fd_dev_name, IS_ERR(dev_p)); 133 fd_dev->fd_dev_name, IS_ERR(dev_p));
146 ret = PTR_ERR(dev_p); 134 ret = PTR_ERR(dev_p);
147 goto fail; 135 goto fail;
@@ -164,12 +152,12 @@ static struct se_device *fd_create_virtdevice(
164 152
165 file = filp_open(dev_p, flags, 0600); 153 file = filp_open(dev_p, flags, 0600);
166 if (IS_ERR(file)) { 154 if (IS_ERR(file)) {
167 printk(KERN_ERR "filp_open(%s) failed\n", dev_p); 155 pr_err("filp_open(%s) failed\n", dev_p);
168 ret = PTR_ERR(file); 156 ret = PTR_ERR(file);
169 goto fail; 157 goto fail;
170 } 158 }
171 if (!file || !file->f_dentry) { 159 if (!file || !file->f_dentry) {
172 printk(KERN_ERR "filp_open(%s) failed\n", dev_p); 160 pr_err("filp_open(%s) failed\n", dev_p);
173 goto fail; 161 goto fail;
174 } 162 }
175 fd_dev->fd_file = file; 163 fd_dev->fd_file = file;
@@ -199,14 +187,14 @@ static struct se_device *fd_create_virtdevice(
199 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - 187 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
200 fd_dev->fd_block_size); 188 fd_dev->fd_block_size);
201 189
202 printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" 190 pr_debug("FILEIO: Using size: %llu bytes from struct"
203 " block_device blocks: %llu logical_block_size: %d\n", 191 " block_device blocks: %llu logical_block_size: %d\n",
204 fd_dev->fd_dev_size, 192 fd_dev->fd_dev_size,
205 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), 193 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
206 fd_dev->fd_block_size); 194 fd_dev->fd_block_size);
207 } else { 195 } else {
208 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 196 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
209 printk(KERN_ERR "FILEIO: Missing fd_dev_size=" 197 pr_err("FILEIO: Missing fd_dev_size="
210 " parameter, and no backing struct" 198 " parameter, and no backing struct"
211 " block_device\n"); 199 " block_device\n");
212 goto fail; 200 goto fail;
@@ -225,13 +213,13 @@ static struct se_device *fd_create_virtdevice(
225 dev = transport_add_device_to_core_hba(hba, &fileio_template, 213 dev = transport_add_device_to_core_hba(hba, &fileio_template,
226 se_dev, dev_flags, fd_dev, 214 se_dev, dev_flags, fd_dev,
227 &dev_limits, "FILEIO", FD_VERSION); 215 &dev_limits, "FILEIO", FD_VERSION);
228 if (!(dev)) 216 if (!dev)
229 goto fail; 217 goto fail;
230 218
231 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 219 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
232 fd_dev->fd_queue_depth = dev->queue_depth; 220 fd_dev->fd_queue_depth = dev->queue_depth;
233 221
234 printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 222 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
235 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 223 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
236 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 224 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
237 225
@@ -269,25 +257,24 @@ static inline struct fd_request *FILE_REQ(struct se_task *task)
269 257
270 258
271static struct se_task * 259static struct se_task *
272fd_alloc_task(struct se_cmd *cmd) 260fd_alloc_task(unsigned char *cdb)
273{ 261{
274 struct fd_request *fd_req; 262 struct fd_request *fd_req;
275 263
276 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); 264 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
277 if (!(fd_req)) { 265 if (!fd_req) {
278 printk(KERN_ERR "Unable to allocate struct fd_request\n"); 266 pr_err("Unable to allocate struct fd_request\n");
279 return NULL; 267 return NULL;
280 } 268 }
281 269
282 fd_req->fd_dev = cmd->se_dev->dev_ptr;
283
284 return &fd_req->fd_task; 270 return &fd_req->fd_task;
285} 271}
286 272
287static int fd_do_readv(struct se_task *task) 273static int fd_do_readv(struct se_task *task)
288{ 274{
289 struct fd_request *req = FILE_REQ(task); 275 struct fd_request *req = FILE_REQ(task);
290 struct file *fd = req->fd_dev->fd_file; 276 struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
277 struct file *fd = dev->fd_file;
291 struct scatterlist *sg = task->task_sg; 278 struct scatterlist *sg = task->task_sg;
292 struct iovec *iov; 279 struct iovec *iov;
293 mm_segment_t old_fs; 280 mm_segment_t old_fs;
@@ -295,20 +282,20 @@ static int fd_do_readv(struct se_task *task)
295 task->se_dev->se_sub_dev->se_dev_attrib.block_size); 282 task->se_dev->se_sub_dev->se_dev_attrib.block_size);
296 int ret = 0, i; 283 int ret = 0, i;
297 284
298 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); 285 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
299 if (!(iov)) { 286 if (!iov) {
300 printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); 287 pr_err("Unable to allocate fd_do_readv iov[]\n");
301 return -ENOMEM; 288 return -ENOMEM;
302 } 289 }
303 290
304 for (i = 0; i < task->task_sg_num; i++) { 291 for (i = 0; i < task->task_sg_nents; i++) {
305 iov[i].iov_len = sg[i].length; 292 iov[i].iov_len = sg[i].length;
306 iov[i].iov_base = sg_virt(&sg[i]); 293 iov[i].iov_base = sg_virt(&sg[i]);
307 } 294 }
308 295
309 old_fs = get_fs(); 296 old_fs = get_fs();
310 set_fs(get_ds()); 297 set_fs(get_ds());
311 ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); 298 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
312 set_fs(old_fs); 299 set_fs(old_fs);
313 300
314 kfree(iov); 301 kfree(iov);
@@ -319,14 +306,14 @@ static int fd_do_readv(struct se_task *task)
319 */ 306 */
320 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 307 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
321 if (ret < 0 || ret != task->task_size) { 308 if (ret < 0 || ret != task->task_size) {
322 printk(KERN_ERR "vfs_readv() returned %d," 309 pr_err("vfs_readv() returned %d,"
323 " expecting %d for S_ISBLK\n", ret, 310 " expecting %d for S_ISBLK\n", ret,
324 (int)task->task_size); 311 (int)task->task_size);
325 return (ret < 0 ? ret : -EINVAL); 312 return (ret < 0 ? ret : -EINVAL);
326 } 313 }
327 } else { 314 } else {
328 if (ret < 0) { 315 if (ret < 0) {
329 printk(KERN_ERR "vfs_readv() returned %d for non" 316 pr_err("vfs_readv() returned %d for non"
330 " S_ISBLK\n", ret); 317 " S_ISBLK\n", ret);
331 return ret; 318 return ret;
332 } 319 }
@@ -338,7 +325,8 @@ static int fd_do_readv(struct se_task *task)
338static int fd_do_writev(struct se_task *task) 325static int fd_do_writev(struct se_task *task)
339{ 326{
340 struct fd_request *req = FILE_REQ(task); 327 struct fd_request *req = FILE_REQ(task);
341 struct file *fd = req->fd_dev->fd_file; 328 struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
329 struct file *fd = dev->fd_file;
342 struct scatterlist *sg = task->task_sg; 330 struct scatterlist *sg = task->task_sg;
343 struct iovec *iov; 331 struct iovec *iov;
344 mm_segment_t old_fs; 332 mm_segment_t old_fs;
@@ -346,26 +334,26 @@ static int fd_do_writev(struct se_task *task)
346 task->se_dev->se_sub_dev->se_dev_attrib.block_size); 334 task->se_dev->se_sub_dev->se_dev_attrib.block_size);
347 int ret, i = 0; 335 int ret, i = 0;
348 336
349 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); 337 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
350 if (!(iov)) { 338 if (!iov) {
351 printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); 339 pr_err("Unable to allocate fd_do_writev iov[]\n");
352 return -ENOMEM; 340 return -ENOMEM;
353 } 341 }
354 342
355 for (i = 0; i < task->task_sg_num; i++) { 343 for (i = 0; i < task->task_sg_nents; i++) {
356 iov[i].iov_len = sg[i].length; 344 iov[i].iov_len = sg[i].length;
357 iov[i].iov_base = sg_virt(&sg[i]); 345 iov[i].iov_base = sg_virt(&sg[i]);
358 } 346 }
359 347
360 old_fs = get_fs(); 348 old_fs = get_fs();
361 set_fs(get_ds()); 349 set_fs(get_ds());
362 ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); 350 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
363 set_fs(old_fs); 351 set_fs(old_fs);
364 352
365 kfree(iov); 353 kfree(iov);
366 354
367 if (ret < 0 || ret != task->task_size) { 355 if (ret < 0 || ret != task->task_size) {
368 printk(KERN_ERR "vfs_writev() returned %d\n", ret); 356 pr_err("vfs_writev() returned %d\n", ret);
369 return (ret < 0 ? ret : -EINVAL); 357 return (ret < 0 ? ret : -EINVAL);
370 } 358 }
371 359
@@ -404,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
404 392
405 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 393 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
406 if (ret != 0) 394 if (ret != 0)
407 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); 395 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
408 396
409 if (!immed) 397 if (!immed)
410 transport_complete_sync_cache(cmd, ret == 0); 398 transport_complete_sync_cache(cmd, ret == 0);
@@ -449,12 +437,12 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
449 loff_t end = start + task->task_size; 437 loff_t end = start + task->task_size;
450 int ret; 438 int ret;
451 439
452 DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 440 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
453 task->task_lba, task->task_size); 441 task->task_lba, task->task_size);
454 442
455 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 443 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
456 if (ret != 0) 444 if (ret != 0)
457 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); 445 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
458} 446}
459 447
460static int fd_do_task(struct se_task *task) 448static int fd_do_task(struct se_task *task)
@@ -548,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params(
548 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, 536 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
549 "%s", arg_p); 537 "%s", arg_p);
550 kfree(arg_p); 538 kfree(arg_p);
551 printk(KERN_INFO "FILEIO: Referencing Path: %s\n", 539 pr_debug("FILEIO: Referencing Path: %s\n",
552 fd_dev->fd_dev_name); 540 fd_dev->fd_dev_name);
553 fd_dev->fbd_flags |= FBDF_HAS_PATH; 541 fd_dev->fbd_flags |= FBDF_HAS_PATH;
554 break; 542 break;
@@ -561,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params(
561 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 549 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
562 kfree(arg_p); 550 kfree(arg_p);
563 if (ret < 0) { 551 if (ret < 0) {
564 printk(KERN_ERR "strict_strtoull() failed for" 552 pr_err("strict_strtoull() failed for"
565 " fd_dev_size=\n"); 553 " fd_dev_size=\n");
566 goto out; 554 goto out;
567 } 555 }
568 printk(KERN_INFO "FILEIO: Referencing Size: %llu" 556 pr_debug("FILEIO: Referencing Size: %llu"
569 " bytes\n", fd_dev->fd_dev_size); 557 " bytes\n", fd_dev->fd_dev_size);
570 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 558 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
571 break; 559 break;
572 case Opt_fd_buffered_io: 560 case Opt_fd_buffered_io:
573 match_int(args, &arg); 561 match_int(args, &arg);
574 if (arg != 1) { 562 if (arg != 1) {
575 printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); 563 pr_err("bogus fd_buffered_io=%d value\n", arg);
576 ret = -EINVAL; 564 ret = -EINVAL;
577 goto out; 565 goto out;
578 } 566 }
579 567
580 printk(KERN_INFO "FILEIO: Using buffered I/O" 568 pr_debug("FILEIO: Using buffered I/O"
581 " operations for struct fd_dev\n"); 569 " operations for struct fd_dev\n");
582 570
583 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; 571 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
@@ -597,7 +585,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
597 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; 585 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
598 586
599 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 587 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
600 printk(KERN_ERR "Missing fd_dev_name=\n"); 588 pr_err("Missing fd_dev_name=\n");
601 return -EINVAL; 589 return -EINVAL;
602 } 590 }
603 591
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 6386d3f60631..daebd710b893 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -16,8 +16,6 @@ struct fd_request {
16 struct se_task fd_task; 16 struct se_task fd_task;
17 /* SCSI CDB from iSCSI Command PDU */ 17 /* SCSI CDB from iSCSI Command PDU */
18 unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; 18 unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
19 /* FILEIO device */
20 struct fd_dev *fd_dev;
21} ____cacheline_aligned; 19} ____cacheline_aligned;
22 20
23#define FBDF_HAS_PATH 0x01 21#define FBDF_HAS_PATH 0x01
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index bd9da25bc945..0639b975d6f5 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -58,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
58 58
59 mutex_lock(&subsystem_mutex); 59 mutex_lock(&subsystem_mutex);
60 list_for_each_entry(s, &subsystem_list, sub_api_list) { 60 list_for_each_entry(s, &subsystem_list, sub_api_list) {
61 if (!(strcmp(s->name, sub_api->name))) { 61 if (!strcmp(s->name, sub_api->name)) {
62 printk(KERN_ERR "%p is already registered with" 62 pr_err("%p is already registered with"
63 " duplicate name %s, unable to process" 63 " duplicate name %s, unable to process"
64 " request\n", s, s->name); 64 " request\n", s, s->name);
65 mutex_unlock(&subsystem_mutex); 65 mutex_unlock(&subsystem_mutex);
@@ -69,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
69 list_add_tail(&sub_api->sub_api_list, &subsystem_list); 69 list_add_tail(&sub_api->sub_api_list, &subsystem_list);
70 mutex_unlock(&subsystem_mutex); 70 mutex_unlock(&subsystem_mutex);
71 71
72 printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" 72 pr_debug("TCM: Registered subsystem plugin: %s struct module:"
73 " %p\n", sub_api->name, sub_api->owner); 73 " %p\n", sub_api->name, sub_api->owner);
74 return 0; 74 return 0;
75} 75}
@@ -109,7 +109,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
109 109
110 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 110 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
111 if (!hba) { 111 if (!hba) {
112 printk(KERN_ERR "Unable to allocate struct se_hba\n"); 112 pr_err("Unable to allocate struct se_hba\n");
113 return ERR_PTR(-ENOMEM); 113 return ERR_PTR(-ENOMEM);
114 } 114 }
115 115
@@ -135,7 +135,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
135 list_add_tail(&hba->hba_node, &hba_list); 135 list_add_tail(&hba->hba_node, &hba_list);
136 spin_unlock(&hba_lock); 136 spin_unlock(&hba_lock);
137 137
138 printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" 138 pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
139 " Core\n", hba->hba_id); 139 " Core\n", hba->hba_id);
140 140
141 return hba; 141 return hba;
@@ -161,7 +161,7 @@ core_delete_hba(struct se_hba *hba)
161 list_del(&hba->hba_node); 161 list_del(&hba->hba_node);
162 spin_unlock(&hba_lock); 162 spin_unlock(&hba_lock);
163 163
164 printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" 164 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
165 " Core\n", hba->hba_id); 165 " Core\n", hba->hba_id);
166 166
167 if (hba->transport->owner) 167 if (hba->transport->owner)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 164b72106b88..251fc66a8212 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -47,12 +47,6 @@
47 47
48#include "target_core_iblock.h" 48#include "target_core_iblock.h"
49 49
50#if 0
51#define DEBUG_IBLOCK(x...) printk(x)
52#else
53#define DEBUG_IBLOCK(x...)
54#endif
55
56static struct se_subsystem_api iblock_template; 50static struct se_subsystem_api iblock_template;
57 51
58static void iblock_bio_done(struct bio *, int); 52static void iblock_bio_done(struct bio *, int);
@@ -66,8 +60,8 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
66 struct iblock_hba *ib_host; 60 struct iblock_hba *ib_host;
67 61
68 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); 62 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
69 if (!(ib_host)) { 63 if (!ib_host) {
70 printk(KERN_ERR "Unable to allocate memory for" 64 pr_err("Unable to allocate memory for"
71 " struct iblock_hba\n"); 65 " struct iblock_hba\n");
72 return -ENOMEM; 66 return -ENOMEM;
73 } 67 }
@@ -76,11 +70,11 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
76 70
77 hba->hba_ptr = ib_host; 71 hba->hba_ptr = ib_host;
78 72
79 printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 73 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
80 " Generic Target Core Stack %s\n", hba->hba_id, 74 " Generic Target Core Stack %s\n", hba->hba_id,
81 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 75 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
82 76
83 printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 77 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
84 hba->hba_id, ib_host->iblock_host_id); 78 hba->hba_id, ib_host->iblock_host_id);
85 79
86 return 0; 80 return 0;
@@ -90,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba)
90{ 84{
91 struct iblock_hba *ib_host = hba->hba_ptr; 85 struct iblock_hba *ib_host = hba->hba_ptr;
92 86
93 printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 87 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
94 " Target Core\n", hba->hba_id, ib_host->iblock_host_id); 88 " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
95 89
96 kfree(ib_host); 90 kfree(ib_host);
@@ -103,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
103 struct iblock_hba *ib_host = hba->hba_ptr; 97 struct iblock_hba *ib_host = hba->hba_ptr;
104 98
105 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 99 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
106 if (!(ib_dev)) { 100 if (!ib_dev) {
107 printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); 101 pr_err("Unable to allocate struct iblock_dev\n");
108 return NULL; 102 return NULL;
109 } 103 }
110 ib_dev->ibd_host = ib_host; 104 ib_dev->ibd_host = ib_host;
111 105
112 printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); 106 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
113 107
114 return ib_dev; 108 return ib_dev;
115} 109}
@@ -128,8 +122,8 @@ static struct se_device *iblock_create_virtdevice(
128 u32 dev_flags = 0; 122 u32 dev_flags = 0;
129 int ret = -EINVAL; 123 int ret = -EINVAL;
130 124
131 if (!(ib_dev)) { 125 if (!ib_dev) {
132 printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); 126 pr_err("Unable to locate struct iblock_dev parameter\n");
133 return ERR_PTR(ret); 127 return ERR_PTR(ret);
134 } 128 }
135 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 129 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@@ -137,16 +131,16 @@ static struct se_device *iblock_create_virtdevice(
137 * These settings need to be made tunable.. 131 * These settings need to be made tunable..
138 */ 132 */
139 ib_dev->ibd_bio_set = bioset_create(32, 64); 133 ib_dev->ibd_bio_set = bioset_create(32, 64);
140 if (!(ib_dev->ibd_bio_set)) { 134 if (!ib_dev->ibd_bio_set) {
141 printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); 135 pr_err("IBLOCK: Unable to create bioset()\n");
142 return ERR_PTR(-ENOMEM); 136 return ERR_PTR(-ENOMEM);
143 } 137 }
144 printk(KERN_INFO "IBLOCK: Created bio_set()\n"); 138 pr_debug("IBLOCK: Created bio_set()\n");
145 /* 139 /*
146 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 140 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
147 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 141 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
148 */ 142 */
149 printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", 143 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
150 ib_dev->ibd_udev_path); 144 ib_dev->ibd_udev_path);
151 145
152 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 146 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
@@ -172,7 +166,7 @@ static struct se_device *iblock_create_virtdevice(
172 dev = transport_add_device_to_core_hba(hba, 166 dev = transport_add_device_to_core_hba(hba,
173 &iblock_template, se_dev, dev_flags, ib_dev, 167 &iblock_template, se_dev, dev_flags, ib_dev,
174 &dev_limits, "IBLOCK", IBLOCK_VERSION); 168 &dev_limits, "IBLOCK", IBLOCK_VERSION);
175 if (!(dev)) 169 if (!dev)
176 goto failed; 170 goto failed;
177 171
178 /* 172 /*
@@ -192,7 +186,7 @@ static struct se_device *iblock_create_virtdevice(
192 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 186 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
193 q->limits.discard_alignment; 187 q->limits.discard_alignment;
194 188
195 printk(KERN_INFO "IBLOCK: BLOCK Discard support available," 189 pr_debug("IBLOCK: BLOCK Discard support available,"
196 " disabled by default\n"); 190 " disabled by default\n");
197 } 191 }
198 192
@@ -227,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
227} 221}
228 222
229static struct se_task * 223static struct se_task *
230iblock_alloc_task(struct se_cmd *cmd) 224iblock_alloc_task(unsigned char *cdb)
231{ 225{
232 struct iblock_req *ib_req; 226 struct iblock_req *ib_req;
233 227
234 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 228 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
235 if (!(ib_req)) { 229 if (!ib_req) {
236 printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); 230 pr_err("Unable to allocate memory for struct iblock_req\n");
237 return NULL; 231 return NULL;
238 } 232 }
239 233
240 ib_req->ib_dev = cmd->se_dev->dev_ptr;
241 atomic_set(&ib_req->ib_bio_cnt, 0); 234 atomic_set(&ib_req->ib_bio_cnt, 0);
242 return &ib_req->ib_task; 235 return &ib_req->ib_task;
243} 236}
@@ -345,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
345 */ 338 */
346 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); 339 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
347 if (ret != 0) { 340 if (ret != 0) {
348 printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " 341 pr_err("IBLOCK: block_issue_flush() failed: %d "
349 " error_sector: %llu\n", ret, 342 " error_sector: %llu\n", ret,
350 (unsigned long long)error_sector); 343 (unsigned long long)error_sector);
351 } 344 }
@@ -409,8 +402,9 @@ static int iblock_do_task(struct se_task *task)
409 while (bio) { 402 while (bio) {
410 nbio = bio->bi_next; 403 nbio = bio->bi_next;
411 bio->bi_next = NULL; 404 bio->bi_next = NULL;
412 DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" 405 pr_debug("Calling submit_bio() task: %p bio: %p"
413 " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); 406 " bio->bi_sector: %llu\n", task, bio,
407 (unsigned long long)bio->bi_sector);
414 408
415 submit_bio(rw, bio); 409 submit_bio(rw, bio);
416 bio = nbio; 410 bio = nbio;
@@ -480,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
480 switch (token) { 474 switch (token) {
481 case Opt_udev_path: 475 case Opt_udev_path:
482 if (ib_dev->ibd_bd) { 476 if (ib_dev->ibd_bd) {
483 printk(KERN_ERR "Unable to set udev_path= while" 477 pr_err("Unable to set udev_path= while"
484 " ib_dev->ibd_bd exists\n"); 478 " ib_dev->ibd_bd exists\n");
485 ret = -EEXIST; 479 ret = -EEXIST;
486 goto out; 480 goto out;
@@ -493,7 +487,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
493 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 487 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
494 "%s", arg_p); 488 "%s", arg_p);
495 kfree(arg_p); 489 kfree(arg_p);
496 printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", 490 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
497 ib_dev->ibd_udev_path); 491 ib_dev->ibd_udev_path);
498 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 492 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
499 break; 493 break;
@@ -516,7 +510,7 @@ static ssize_t iblock_check_configfs_dev_params(
516 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 510 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
517 511
518 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 512 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
519 printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); 513 pr_err("Missing udev_path= parameters for IBLOCK\n");
520 return -EINVAL; 514 return -EINVAL;
521 } 515 }
522 516
@@ -574,15 +568,15 @@ static struct bio *iblock_get_bio(
574 struct bio *bio; 568 struct bio *bio;
575 569
576 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 570 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
577 if (!(bio)) { 571 if (!bio) {
578 printk(KERN_ERR "Unable to allocate memory for bio\n"); 572 pr_err("Unable to allocate memory for bio\n");
579 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 573 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
580 return NULL; 574 return NULL;
581 } 575 }
582 576
583 DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" 577 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
584 " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); 578 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
585 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); 579 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
586 580
587 bio->bi_bdev = ib_dev->ibd_bd; 581 bio->bi_bdev = ib_dev->ibd_bd;
588 bio->bi_private = task; 582 bio->bi_private = task;
@@ -591,8 +585,8 @@ static struct bio *iblock_get_bio(
591 bio->bi_sector = lba; 585 bio->bi_sector = lba;
592 atomic_inc(&ib_req->ib_bio_cnt); 586 atomic_inc(&ib_req->ib_bio_cnt);
593 587
594 DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); 588 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
595 DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", 589 pr_debug("Set ib_req->ib_bio_cnt: %d\n",
596 atomic_read(&ib_req->ib_bio_cnt)); 590 atomic_read(&ib_req->ib_bio_cnt));
597 return bio; 591 return bio;
598} 592}
@@ -606,7 +600,7 @@ static int iblock_map_task_SG(struct se_task *task)
606 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; 600 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
607 struct scatterlist *sg; 601 struct scatterlist *sg;
608 int ret = 0; 602 int ret = 0;
609 u32 i, sg_num = task->task_sg_num; 603 u32 i, sg_num = task->task_sg_nents;
610 sector_t block_lba; 604 sector_t block_lba;
611 /* 605 /*
612 * Do starting conversion up from non 512-byte blocksize with 606 * Do starting conversion up from non 512-byte blocksize with
@@ -621,13 +615,13 @@ static int iblock_map_task_SG(struct se_task *task)
621 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 615 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
622 block_lba = task->task_lba; 616 block_lba = task->task_lba;
623 else { 617 else {
624 printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" 618 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
625 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 619 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
626 return PYX_TRANSPORT_LU_COMM_FAILURE; 620 return PYX_TRANSPORT_LU_COMM_FAILURE;
627 } 621 }
628 622
629 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); 623 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
630 if (!(bio)) 624 if (!bio)
631 return ret; 625 return ret;
632 626
633 ib_req->ib_bio = bio; 627 ib_req->ib_bio = bio;
@@ -636,41 +630,41 @@ static int iblock_map_task_SG(struct se_task *task)
636 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist 630 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
637 * from task->task_sg -> struct scatterlist memory. 631 * from task->task_sg -> struct scatterlist memory.
638 */ 632 */
639 for_each_sg(task->task_sg, sg, task->task_sg_num, i) { 633 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
640 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" 634 pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
641 " %p len: %u offset: %u\n", task, bio, sg_page(sg), 635 " %p len: %u offset: %u\n", task, bio, sg_page(sg),
642 sg->length, sg->offset); 636 sg->length, sg->offset);
643again: 637again:
644 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); 638 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
645 if (ret != sg->length) { 639 if (ret != sg->length) {
646 640
647 DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", 641 pr_debug("*** Set bio->bi_sector: %llu\n",
648 bio->bi_sector); 642 (unsigned long long)bio->bi_sector);
649 DEBUG_IBLOCK("** task->task_size: %u\n", 643 pr_debug("** task->task_size: %u\n",
650 task->task_size); 644 task->task_size);
651 DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", 645 pr_debug("*** bio->bi_max_vecs: %u\n",
652 bio->bi_max_vecs); 646 bio->bi_max_vecs);
653 DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", 647 pr_debug("*** bio->bi_vcnt: %u\n",
654 bio->bi_vcnt); 648 bio->bi_vcnt);
655 649
656 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, 650 bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
657 block_lba, sg_num); 651 block_lba, sg_num);
658 if (!(bio)) 652 if (!bio)
659 goto fail; 653 goto fail;
660 654
661 tbio = tbio->bi_next = bio; 655 tbio = tbio->bi_next = bio;
662 DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" 656 pr_debug("-----------------> Added +1 bio: %p to"
663 " list, Going to again\n", bio); 657 " list, Going to again\n", bio);
664 goto again; 658 goto again;
665 } 659 }
666 /* Always in 512 byte units for Linux/Block */ 660 /* Always in 512 byte units for Linux/Block */
667 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 661 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
668 sg_num--; 662 sg_num--;
669 DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" 663 pr_debug("task: %p bio-add_page() passed!, decremented"
670 " sg_num to %u\n", task, sg_num); 664 " sg_num to %u\n", task, sg_num);
671 DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" 665 pr_debug("task: %p bio_add_page() passed!, increased lba"
672 " to %llu\n", task, block_lba); 666 " to %llu\n", task, (unsigned long long)block_lba);
673 DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" 667 pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
674 " %u\n", task, bio->bi_vcnt); 668 " %u\n", task, bio->bi_vcnt);
675 } 669 }
676 670
@@ -716,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err)
716 /* 710 /*
717 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 711 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
718 */ 712 */
719 if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) 713 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
720 err = -EIO; 714 err = -EIO;
721 715
722 if (err != 0) { 716 if (err != 0) {
723 printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," 717 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
724 " err: %d\n", bio, err); 718 " err: %d\n", bio, err);
725 /* 719 /*
726 * Bump the ib_bio_err_cnt and release bio. 720 * Bump the ib_bio_err_cnt and release bio.
@@ -731,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err)
731 /* 725 /*
732 * Wait to complete the task until the last bio as completed. 726 * Wait to complete the task until the last bio as completed.
733 */ 727 */
734 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 728 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
735 return; 729 return;
736 730
737 ibr->ib_bio = NULL; 731 ibr->ib_bio = NULL;
738 transport_complete_task(task, 0); 732 transport_complete_task(task, 0);
739 return; 733 return;
740 } 734 }
741 DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 735 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
742 task, bio, task->task_lba, bio->bi_sector, err); 736 task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
743 /* 737 /*
744 * bio_put() will call iblock_bio_destructor() to release the bio back 738 * bio_put() will call iblock_bio_destructor() to release the bio back
745 * to ibr->ib_bio_set. 739 * to ibr->ib_bio_set.
@@ -748,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err)
748 /* 742 /*
749 * Wait to complete the task until the last bio as completed. 743 * Wait to complete the task until the last bio as completed.
750 */ 744 */
751 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 745 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
752 return; 746 return;
753 /* 747 /*
754 * Return GOOD status for task if zero ib_bio_err_cnt exists. 748 * Return GOOD status for task if zero ib_bio_err_cnt exists.
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 2aa1d27a49b9..a121cd1b6575 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -12,7 +12,6 @@ struct iblock_req {
12 atomic_t ib_bio_cnt; 12 atomic_t ib_bio_cnt;
13 atomic_t ib_bio_err_cnt; 13 atomic_t ib_bio_err_cnt;
14 struct bio *ib_bio; 14 struct bio *ib_bio;
15 struct iblock_dev *ib_dev;
16} ____cacheline_aligned; 15} ____cacheline_aligned;
17 16
18#define IBDF_HAS_UDEV_PATH 0x01 17#define IBDF_HAS_UDEV_PATH 0x01
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3342843f8619..1c1b849cd4fb 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -62,7 +62,7 @@ int core_pr_dump_initiator_port(
62 char *buf, 62 char *buf,
63 u32 size) 63 u32 size)
64{ 64{
65 if (!(pr_reg->isid_present_at_reg)) 65 if (!pr_reg->isid_present_at_reg)
66 return 0; 66 return 0;
67 67
68 snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); 68 snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
@@ -95,7 +95,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
95 struct se_session *sess = cmd->se_sess; 95 struct se_session *sess = cmd->se_sess;
96 int ret; 96 int ret;
97 97
98 if (!(sess)) 98 if (!sess)
99 return 0; 99 return 0;
100 100
101 spin_lock(&dev->dev_reservation_lock); 101 spin_lock(&dev->dev_reservation_lock);
@@ -123,7 +123,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
123 struct se_session *sess = cmd->se_sess; 123 struct se_session *sess = cmd->se_sess;
124 struct se_portal_group *tpg = sess->se_tpg; 124 struct se_portal_group *tpg = sess->se_tpg;
125 125
126 if (!(sess) || !(tpg)) 126 if (!sess || !tpg)
127 return 0; 127 return 0;
128 128
129 spin_lock(&dev->dev_reservation_lock); 129 spin_lock(&dev->dev_reservation_lock);
@@ -142,7 +142,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
142 dev->dev_res_bin_isid = 0; 142 dev->dev_res_bin_isid = 0;
143 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; 143 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
144 } 144 }
145 printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" 145 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
146 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 146 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
147 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, 147 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
148 sess->se_node_acl->initiatorname); 148 sess->se_node_acl->initiatorname);
@@ -159,7 +159,7 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
159 159
160 if ((cmd->t_task_cdb[1] & 0x01) && 160 if ((cmd->t_task_cdb[1] & 0x01) &&
161 (cmd->t_task_cdb[1] & 0x02)) { 161 (cmd->t_task_cdb[1] & 0x02)) {
162 printk(KERN_ERR "LongIO and Obselete Bits set, returning" 162 pr_err("LongIO and Obselete Bits set, returning"
163 " ILLEGAL_REQUEST\n"); 163 " ILLEGAL_REQUEST\n");
164 return PYX_TRANSPORT_ILLEGAL_REQUEST; 164 return PYX_TRANSPORT_ILLEGAL_REQUEST;
165 } 165 }
@@ -167,18 +167,18 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
167 * This is currently the case for target_core_mod passthrough struct se_cmd 167 * This is currently the case for target_core_mod passthrough struct se_cmd
168 * ops 168 * ops
169 */ 169 */
170 if (!(sess) || !(tpg)) 170 if (!sess || !tpg)
171 return 0; 171 return 0;
172 172
173 spin_lock(&dev->dev_reservation_lock); 173 spin_lock(&dev->dev_reservation_lock);
174 if (dev->dev_reserved_node_acl && 174 if (dev->dev_reserved_node_acl &&
175 (dev->dev_reserved_node_acl != sess->se_node_acl)) { 175 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
176 printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 176 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
177 tpg->se_tpg_tfo->get_fabric_name()); 177 tpg->se_tpg_tfo->get_fabric_name());
178 printk(KERN_ERR "Original reserver LUN: %u %s\n", 178 pr_err("Original reserver LUN: %u %s\n",
179 cmd->se_lun->unpacked_lun, 179 cmd->se_lun->unpacked_lun,
180 dev->dev_reserved_node_acl->initiatorname); 180 dev->dev_reserved_node_acl->initiatorname);
181 printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" 181 pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
182 " from %s \n", cmd->se_lun->unpacked_lun, 182 " from %s \n", cmd->se_lun->unpacked_lun,
183 cmd->se_deve->mapped_lun, 183 cmd->se_deve->mapped_lun,
184 sess->se_node_acl->initiatorname); 184 sess->se_node_acl->initiatorname);
@@ -192,7 +192,7 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
192 dev->dev_res_bin_isid = sess->sess_bin_isid; 192 dev->dev_res_bin_isid = sess->sess_bin_isid;
193 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; 193 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
194 } 194 }
195 printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" 195 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
196 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 196 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
197 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, 197 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
198 sess->se_node_acl->initiatorname); 198 sess->se_node_acl->initiatorname);
@@ -220,10 +220,10 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
220 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 220 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
221 int conflict = 0; 221 int conflict = 0;
222 222
223 if (!(se_sess)) 223 if (!se_sess)
224 return 0; 224 return 0;
225 225
226 if (!(crh)) 226 if (!crh)
227 goto after_crh; 227 goto after_crh;
228 228
229 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 229 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -280,7 +280,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
280 } 280 }
281 281
282 if (conflict) { 282 if (conflict) {
283 printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE" 283 pr_err("Received legacy SPC-2 RESERVE/RELEASE"
284 " while active SPC-3 registrations exist," 284 " while active SPC-3 registrations exist,"
285 " returning RESERVATION_CONFLICT\n"); 285 " returning RESERVATION_CONFLICT\n");
286 return PYX_TRANSPORT_RESERVATION_CONFLICT; 286 return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -412,7 +412,7 @@ static int core_scsi3_pr_seq_non_holder(
412 ret = (registered_nexus) ? 0 : 1; 412 ret = (registered_nexus) ? 0 : 1;
413 break; 413 break;
414 default: 414 default:
415 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" 415 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
416 " action: 0x%02x\n", cdb[1] & 0x1f); 416 " action: 0x%02x\n", cdb[1] & 0x1f);
417 return -EINVAL; 417 return -EINVAL;
418 } 418 }
@@ -459,7 +459,7 @@ static int core_scsi3_pr_seq_non_holder(
459 ret = 0; /* Allowed */ 459 ret = 0; /* Allowed */
460 break; 460 break;
461 default: 461 default:
462 printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", 462 pr_err("Unknown MI Service Action: 0x%02x\n",
463 (cdb[1] & 0x1f)); 463 (cdb[1] & 0x1f));
464 return -EINVAL; 464 return -EINVAL;
465 } 465 }
@@ -481,9 +481,9 @@ static int core_scsi3_pr_seq_non_holder(
481 * Case where the CDB is explicitly allowed in the above switch 481 * Case where the CDB is explicitly allowed in the above switch
482 * statement. 482 * statement.
483 */ 483 */
484 if (!(ret) && !(other_cdb)) { 484 if (!ret && !other_cdb) {
485#if 0 485#if 0
486 printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s" 486 pr_debug("Allowing explict CDB: 0x%02x for %s"
487 " reservation holder\n", cdb[0], 487 " reservation holder\n", cdb[0],
488 core_scsi3_pr_dump_type(pr_reg_type)); 488 core_scsi3_pr_dump_type(pr_reg_type));
489#endif 489#endif
@@ -498,7 +498,7 @@ static int core_scsi3_pr_seq_non_holder(
498 /* 498 /*
499 * Conflict for write exclusive 499 * Conflict for write exclusive
500 */ 500 */
501 printk(KERN_INFO "%s Conflict for unregistered nexus" 501 pr_debug("%s Conflict for unregistered nexus"
502 " %s CDB: 0x%02x to %s reservation\n", 502 " %s CDB: 0x%02x to %s reservation\n",
503 transport_dump_cmd_direction(cmd), 503 transport_dump_cmd_direction(cmd),
504 se_sess->se_node_acl->initiatorname, cdb[0], 504 se_sess->se_node_acl->initiatorname, cdb[0],
@@ -515,8 +515,8 @@ static int core_scsi3_pr_seq_non_holder(
515 * nexuses to issue CDBs. 515 * nexuses to issue CDBs.
516 */ 516 */
517#if 0 517#if 0
518 if (!(registered_nexus)) { 518 if (!registered_nexus) {
519 printk(KERN_INFO "Allowing implict CDB: 0x%02x" 519 pr_debug("Allowing implict CDB: 0x%02x"
520 " for %s reservation on unregistered" 520 " for %s reservation on unregistered"
521 " nexus\n", cdb[0], 521 " nexus\n", cdb[0],
522 core_scsi3_pr_dump_type(pr_reg_type)); 522 core_scsi3_pr_dump_type(pr_reg_type));
@@ -531,14 +531,14 @@ static int core_scsi3_pr_seq_non_holder(
531 * allow commands from registered nexuses. 531 * allow commands from registered nexuses.
532 */ 532 */
533#if 0 533#if 0
534 printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s" 534 pr_debug("Allowing implict CDB: 0x%02x for %s"
535 " reservation\n", cdb[0], 535 " reservation\n", cdb[0],
536 core_scsi3_pr_dump_type(pr_reg_type)); 536 core_scsi3_pr_dump_type(pr_reg_type));
537#endif 537#endif
538 return 0; 538 return 0;
539 } 539 }
540 } 540 }
541 printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x" 541 pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
542 " for %s reservation\n", transport_dump_cmd_direction(cmd), 542 " for %s reservation\n", transport_dump_cmd_direction(cmd),
543 (registered_nexus) ? "" : "un", 543 (registered_nexus) ? "" : "un",
544 se_sess->se_node_acl->initiatorname, cdb[0], 544 se_sess->se_node_acl->initiatorname, cdb[0],
@@ -575,7 +575,7 @@ static int core_scsi3_pr_reservation_check(
575 struct se_session *sess = cmd->se_sess; 575 struct se_session *sess = cmd->se_sess;
576 int ret; 576 int ret;
577 577
578 if (!(sess)) 578 if (!sess)
579 return 0; 579 return 0;
580 /* 580 /*
581 * A legacy SPC-2 reservation is being held. 581 * A legacy SPC-2 reservation is being held.
@@ -584,7 +584,7 @@ static int core_scsi3_pr_reservation_check(
584 return core_scsi2_reservation_check(cmd, pr_reg_type); 584 return core_scsi2_reservation_check(cmd, pr_reg_type);
585 585
586 spin_lock(&dev->dev_reservation_lock); 586 spin_lock(&dev->dev_reservation_lock);
587 if (!(dev->dev_pr_res_holder)) { 587 if (!dev->dev_pr_res_holder) {
588 spin_unlock(&dev->dev_reservation_lock); 588 spin_unlock(&dev->dev_reservation_lock);
589 return 0; 589 return 0;
590 } 590 }
@@ -594,7 +594,7 @@ static int core_scsi3_pr_reservation_check(
594 spin_unlock(&dev->dev_reservation_lock); 594 spin_unlock(&dev->dev_reservation_lock);
595 return -EINVAL; 595 return -EINVAL;
596 } 596 }
597 if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { 597 if (!dev->dev_pr_res_holder->isid_present_at_reg) {
598 spin_unlock(&dev->dev_reservation_lock); 598 spin_unlock(&dev->dev_reservation_lock);
599 return 0; 599 return 0;
600 } 600 }
@@ -624,15 +624,15 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
624 struct t10_pr_registration *pr_reg; 624 struct t10_pr_registration *pr_reg;
625 625
626 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); 626 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
627 if (!(pr_reg)) { 627 if (!pr_reg) {
628 printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); 628 pr_err("Unable to allocate struct t10_pr_registration\n");
629 return NULL; 629 return NULL;
630 } 630 }
631 631
632 pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, 632 pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
633 GFP_ATOMIC); 633 GFP_ATOMIC);
634 if (!(pr_reg->pr_aptpl_buf)) { 634 if (!pr_reg->pr_aptpl_buf) {
635 printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); 635 pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
636 kmem_cache_free(t10_pr_reg_cache, pr_reg); 636 kmem_cache_free(t10_pr_reg_cache, pr_reg);
637 return NULL; 637 return NULL;
638 } 638 }
@@ -692,12 +692,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
692 */ 692 */
693 pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, 693 pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
694 sa_res_key, all_tg_pt, aptpl); 694 sa_res_key, all_tg_pt, aptpl);
695 if (!(pr_reg)) 695 if (!pr_reg)
696 return NULL; 696 return NULL;
697 /* 697 /*
698 * Return pointer to pr_reg for ALL_TG_PT=0 698 * Return pointer to pr_reg for ALL_TG_PT=0
699 */ 699 */
700 if (!(all_tg_pt)) 700 if (!all_tg_pt)
701 return pr_reg; 701 return pr_reg;
702 /* 702 /*
703 * Create list of matching SCSI Initiator Port registrations 703 * Create list of matching SCSI Initiator Port registrations
@@ -717,7 +717,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
717 * that have not been make explict via a ConfigFS 717 * that have not been make explict via a ConfigFS
718 * MappedLUN group for the SCSI Initiator Node ACL. 718 * MappedLUN group for the SCSI Initiator Node ACL.
719 */ 719 */
720 if (!(deve_tmp->se_lun_acl)) 720 if (!deve_tmp->se_lun_acl)
721 continue; 721 continue;
722 722
723 nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; 723 nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
@@ -751,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
751 */ 751 */
752 ret = core_scsi3_lunacl_depend_item(deve_tmp); 752 ret = core_scsi3_lunacl_depend_item(deve_tmp);
753 if (ret < 0) { 753 if (ret < 0) {
754 printk(KERN_ERR "core_scsi3_lunacl_depend" 754 pr_err("core_scsi3_lunacl_depend"
755 "_item() failed\n"); 755 "_item() failed\n");
756 atomic_dec(&port->sep_tg_pt_ref_cnt); 756 atomic_dec(&port->sep_tg_pt_ref_cnt);
757 smp_mb__after_atomic_dec(); 757 smp_mb__after_atomic_dec();
@@ -769,7 +769,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
769 pr_reg_atp = __core_scsi3_do_alloc_registration(dev, 769 pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
770 nacl_tmp, deve_tmp, NULL, 770 nacl_tmp, deve_tmp, NULL,
771 sa_res_key, all_tg_pt, aptpl); 771 sa_res_key, all_tg_pt, aptpl);
772 if (!(pr_reg_atp)) { 772 if (!pr_reg_atp) {
773 atomic_dec(&port->sep_tg_pt_ref_cnt); 773 atomic_dec(&port->sep_tg_pt_ref_cnt);
774 smp_mb__after_atomic_dec(); 774 smp_mb__after_atomic_dec();
775 atomic_dec(&deve_tmp->pr_ref_count); 775 atomic_dec(&deve_tmp->pr_ref_count);
@@ -817,14 +817,14 @@ int core_scsi3_alloc_aptpl_registration(
817{ 817{
818 struct t10_pr_registration *pr_reg; 818 struct t10_pr_registration *pr_reg;
819 819
820 if (!(i_port) || !(t_port) || !(sa_res_key)) { 820 if (!i_port || !t_port || !sa_res_key) {
821 printk(KERN_ERR "Illegal parameters for APTPL registration\n"); 821 pr_err("Illegal parameters for APTPL registration\n");
822 return -EINVAL; 822 return -EINVAL;
823 } 823 }
824 824
825 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); 825 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
826 if (!(pr_reg)) { 826 if (!pr_reg) {
827 printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); 827 pr_err("Unable to allocate struct t10_pr_registration\n");
828 return -ENOMEM; 828 return -ENOMEM;
829 } 829 }
830 pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); 830 pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
@@ -869,7 +869,7 @@ int core_scsi3_alloc_aptpl_registration(
869 pr_reg->pr_res_holder = res_holder; 869 pr_reg->pr_res_holder = res_holder;
870 870
871 list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); 871 list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
872 printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from" 872 pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
873 " metadata\n", (res_holder) ? "+reservation" : ""); 873 " metadata\n", (res_holder) ? "+reservation" : "");
874 return 0; 874 return 0;
875} 875}
@@ -891,12 +891,12 @@ static void core_scsi3_aptpl_reserve(
891 dev->dev_pr_res_holder = pr_reg; 891 dev->dev_pr_res_holder = pr_reg;
892 spin_unlock(&dev->dev_reservation_lock); 892 spin_unlock(&dev->dev_reservation_lock);
893 893
894 printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" 894 pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
895 " new reservation holder TYPE: %s ALL_TG_PT: %d\n", 895 " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
896 tpg->se_tpg_tfo->get_fabric_name(), 896 tpg->se_tpg_tfo->get_fabric_name(),
897 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 897 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
898 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 898 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
899 printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", 899 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
900 tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, 900 tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
901 (prf_isid) ? &i_buf[0] : ""); 901 (prf_isid) ? &i_buf[0] : "");
902} 902}
@@ -936,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration(
936 spin_lock(&pr_tmpl->aptpl_reg_lock); 936 spin_lock(&pr_tmpl->aptpl_reg_lock);
937 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, 937 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
938 pr_reg_aptpl_list) { 938 pr_reg_aptpl_list) {
939 if (!(strcmp(pr_reg->pr_iport, i_port)) && 939 if (!strcmp(pr_reg->pr_iport, i_port) &&
940 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && 940 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
941 !(strcmp(pr_reg->pr_tport, t_port)) && 941 !(strcmp(pr_reg->pr_tport, t_port)) &&
942 (pr_reg->pr_reg_tpgt == tpgt) && 942 (pr_reg->pr_reg_tpgt == tpgt) &&
@@ -1006,19 +1006,19 @@ static void __core_scsi3_dump_registration(
1006 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 1006 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1007 PR_REG_ISID_ID_LEN); 1007 PR_REG_ISID_ID_LEN);
1008 1008
1009 printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator" 1009 pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
1010 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? 1010 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
1011 "_AND_MOVE" : (register_type == 1) ? 1011 "_AND_MOVE" : (register_type == 1) ?
1012 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, 1012 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
1013 (prf_isid) ? i_buf : ""); 1013 (prf_isid) ? i_buf : "");
1014 printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", 1014 pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
1015 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), 1015 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
1016 tfo->tpg_get_tag(se_tpg)); 1016 tfo->tpg_get_tag(se_tpg));
1017 printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1017 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1018 " Port(s)\n", tfo->get_fabric_name(), 1018 " Port(s)\n", tfo->get_fabric_name(),
1019 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1019 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1020 dev->transport->name); 1020 dev->transport->name);
1021 printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1021 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1022 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), 1022 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
1023 pr_reg->pr_res_key, pr_reg->pr_res_generation, 1023 pr_reg->pr_res_key, pr_reg->pr_res_generation,
1024 pr_reg->pr_reg_aptpl); 1024 pr_reg->pr_reg_aptpl);
@@ -1062,7 +1062,7 @@ static void __core_scsi3_add_registration(
1062 /* 1062 /*
1063 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. 1063 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
1064 */ 1064 */
1065 if (!(pr_reg->pr_reg_all_tg_pt) || (register_move)) 1065 if (!pr_reg->pr_reg_all_tg_pt || register_move)
1066 return; 1066 return;
1067 /* 1067 /*
1068 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 1068 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
@@ -1106,7 +1106,7 @@ static int core_scsi3_alloc_registration(
1106 1106
1107 pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, 1107 pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
1108 sa_res_key, all_tg_pt, aptpl); 1108 sa_res_key, all_tg_pt, aptpl);
1109 if (!(pr_reg)) 1109 if (!pr_reg)
1110 return -EPERM; 1110 return -EPERM;
1111 1111
1112 __core_scsi3_add_registration(dev, nacl, pr_reg, 1112 __core_scsi3_add_registration(dev, nacl, pr_reg,
@@ -1137,7 +1137,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1137 * If this registration does NOT contain a fabric provided 1137 * If this registration does NOT contain a fabric provided
1138 * ISID, then we have found a match. 1138 * ISID, then we have found a match.
1139 */ 1139 */
1140 if (!(pr_reg->isid_present_at_reg)) { 1140 if (!pr_reg->isid_present_at_reg) {
1141 /* 1141 /*
1142 * Determine if this SCSI device server requires that 1142 * Determine if this SCSI device server requires that
1143 * SCSI Intiatior TransportID w/ ISIDs is enforced 1143 * SCSI Intiatior TransportID w/ ISIDs is enforced
@@ -1157,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1157 * SCSI Initiator Port TransportIDs, then we expect a valid 1157 * SCSI Initiator Port TransportIDs, then we expect a valid
1158 * matching ISID to be provided by the local SCSI Initiator Port. 1158 * matching ISID to be provided by the local SCSI Initiator Port.
1159 */ 1159 */
1160 if (!(isid)) 1160 if (!isid)
1161 continue; 1161 continue;
1162 if (strcmp(isid, pr_reg->pr_reg_isid)) 1162 if (strcmp(isid, pr_reg->pr_reg_isid))
1163 continue; 1163 continue;
@@ -1206,7 +1206,7 @@ static int core_scsi3_check_implict_release(
1206 1206
1207 spin_lock(&dev->dev_reservation_lock); 1207 spin_lock(&dev->dev_reservation_lock);
1208 pr_res_holder = dev->dev_pr_res_holder; 1208 pr_res_holder = dev->dev_pr_res_holder;
1209 if (!(pr_res_holder)) { 1209 if (!pr_res_holder) {
1210 spin_unlock(&dev->dev_reservation_lock); 1210 spin_unlock(&dev->dev_reservation_lock);
1211 return ret; 1211 return ret;
1212 } 1212 }
@@ -1236,7 +1236,7 @@ static int core_scsi3_check_implict_release(
1236 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, 1236 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
1237 pr_reg->pr_reg_nacl->initiatorname)) && 1237 pr_reg->pr_reg_nacl->initiatorname)) &&
1238 (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { 1238 (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
1239 printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1" 1239 pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
1240 " UNREGISTER while existing reservation with matching" 1240 " UNREGISTER while existing reservation with matching"
1241 " key 0x%016Lx is present from another SCSI Initiator" 1241 " key 0x%016Lx is present from another SCSI Initiator"
1242 " Port\n", pr_reg->pr_res_key); 1242 " Port\n", pr_reg->pr_res_key);
@@ -1283,25 +1283,25 @@ static void __core_scsi3_free_registration(
1283 */ 1283 */
1284 while (atomic_read(&pr_reg->pr_res_holders) != 0) { 1284 while (atomic_read(&pr_reg->pr_res_holders) != 0) {
1285 spin_unlock(&pr_tmpl->registration_lock); 1285 spin_unlock(&pr_tmpl->registration_lock);
1286 printk("SPC-3 PR [%s] waiting for pr_res_holders\n", 1286 pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
1287 tfo->get_fabric_name()); 1287 tfo->get_fabric_name());
1288 cpu_relax(); 1288 cpu_relax();
1289 spin_lock(&pr_tmpl->registration_lock); 1289 spin_lock(&pr_tmpl->registration_lock);
1290 } 1290 }
1291 1291
1292 printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator" 1292 pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
1293 " Node: %s%s\n", tfo->get_fabric_name(), 1293 " Node: %s%s\n", tfo->get_fabric_name(),
1294 pr_reg->pr_reg_nacl->initiatorname, 1294 pr_reg->pr_reg_nacl->initiatorname,
1295 (prf_isid) ? &i_buf[0] : ""); 1295 (prf_isid) ? &i_buf[0] : "");
1296 printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1296 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1297 " Port(s)\n", tfo->get_fabric_name(), 1297 " Port(s)\n", tfo->get_fabric_name(),
1298 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1298 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1299 dev->transport->name); 1299 dev->transport->name);
1300 printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1300 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1301 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, 1301 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
1302 pr_reg->pr_res_generation); 1302 pr_reg->pr_res_generation);
1303 1303
1304 if (!(preempt_and_abort_list)) { 1304 if (!preempt_and_abort_list) {
1305 pr_reg->pr_reg_deve = NULL; 1305 pr_reg->pr_reg_deve = NULL;
1306 pr_reg->pr_reg_nacl = NULL; 1306 pr_reg->pr_reg_nacl = NULL;
1307 kfree(pr_reg->pr_aptpl_buf); 1307 kfree(pr_reg->pr_aptpl_buf);
@@ -1430,7 +1430,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1430 /* 1430 /*
1431 * For nacl->dynamic_node_acl=1 1431 * For nacl->dynamic_node_acl=1
1432 */ 1432 */
1433 if (!(lun_acl)) 1433 if (!lun_acl)
1434 return 0; 1434 return 0;
1435 1435
1436 nacl = lun_acl->se_lun_nacl; 1436 nacl = lun_acl->se_lun_nacl;
@@ -1448,7 +1448,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1448 /* 1448 /*
1449 * For nacl->dynamic_node_acl=1 1449 * For nacl->dynamic_node_acl=1
1450 */ 1450 */
1451 if (!(lun_acl)) { 1451 if (!lun_acl) {
1452 atomic_dec(&se_deve->pr_ref_count); 1452 atomic_dec(&se_deve->pr_ref_count);
1453 smp_mb__after_atomic_dec(); 1453 smp_mb__after_atomic_dec();
1454 return; 1454 return;
@@ -1500,8 +1500,8 @@ static int core_scsi3_decode_spec_i_port(
1500 * processing in the loop of tid_dest_list below. 1500 * processing in the loop of tid_dest_list below.
1501 */ 1501 */
1502 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); 1502 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1503 if (!(tidh_new)) { 1503 if (!tidh_new) {
1504 printk(KERN_ERR "Unable to allocate tidh_new\n"); 1504 pr_err("Unable to allocate tidh_new\n");
1505 return PYX_TRANSPORT_LU_COMM_FAILURE; 1505 return PYX_TRANSPORT_LU_COMM_FAILURE;
1506 } 1506 }
1507 INIT_LIST_HEAD(&tidh_new->dest_list); 1507 INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1512,7 +1512,7 @@ static int core_scsi3_decode_spec_i_port(
1512 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, 1512 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
1513 se_sess->se_node_acl, local_se_deve, l_isid, 1513 se_sess->se_node_acl, local_se_deve, l_isid,
1514 sa_res_key, all_tg_pt, aptpl); 1514 sa_res_key, all_tg_pt, aptpl);
1515 if (!(local_pr_reg)) { 1515 if (!local_pr_reg) {
1516 kfree(tidh_new); 1516 kfree(tidh_new);
1517 return PYX_TRANSPORT_LU_COMM_FAILURE; 1517 return PYX_TRANSPORT_LU_COMM_FAILURE;
1518 } 1518 }
@@ -1537,7 +1537,7 @@ static int core_scsi3_decode_spec_i_port(
1537 tpdl |= buf[27] & 0xff; 1537 tpdl |= buf[27] & 0xff;
1538 1538
1539 if ((tpdl + 28) != cmd->data_length) { 1539 if ((tpdl + 28) != cmd->data_length) {
1540 printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1540 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
1541 " does not equal CDB data_length: %u\n", tpdl, 1541 " does not equal CDB data_length: %u\n", tpdl,
1542 cmd->data_length); 1542 cmd->data_length);
1543 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1543 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -1557,13 +1557,13 @@ static int core_scsi3_decode_spec_i_port(
1557 spin_lock(&dev->se_port_lock); 1557 spin_lock(&dev->se_port_lock);
1558 list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { 1558 list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
1559 tmp_tpg = tmp_port->sep_tpg; 1559 tmp_tpg = tmp_port->sep_tpg;
1560 if (!(tmp_tpg)) 1560 if (!tmp_tpg)
1561 continue; 1561 continue;
1562 tmp_tf_ops = tmp_tpg->se_tpg_tfo; 1562 tmp_tf_ops = tmp_tpg->se_tpg_tfo;
1563 if (!(tmp_tf_ops)) 1563 if (!tmp_tf_ops)
1564 continue; 1564 continue;
1565 if (!(tmp_tf_ops->get_fabric_proto_ident) || 1565 if (!tmp_tf_ops->get_fabric_proto_ident ||
1566 !(tmp_tf_ops->tpg_parse_pr_out_transport_id)) 1566 !tmp_tf_ops->tpg_parse_pr_out_transport_id)
1567 continue; 1567 continue;
1568 /* 1568 /*
1569 * Look for the matching proto_ident provided by 1569 * Look for the matching proto_ident provided by
@@ -1577,7 +1577,7 @@ static int core_scsi3_decode_spec_i_port(
1577 i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( 1577 i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
1578 tmp_tpg, (const char *)ptr, &tid_len, 1578 tmp_tpg, (const char *)ptr, &tid_len,
1579 &iport_ptr); 1579 &iport_ptr);
1580 if (!(i_str)) 1580 if (!i_str)
1581 continue; 1581 continue;
1582 1582
1583 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1583 atomic_inc(&tmp_tpg->tpg_pr_ref_count);
@@ -1586,7 +1586,7 @@ static int core_scsi3_decode_spec_i_port(
1586 1586
1587 ret = core_scsi3_tpg_depend_item(tmp_tpg); 1587 ret = core_scsi3_tpg_depend_item(tmp_tpg);
1588 if (ret != 0) { 1588 if (ret != 0) {
1589 printk(KERN_ERR " core_scsi3_tpg_depend_item()" 1589 pr_err(" core_scsi3_tpg_depend_item()"
1590 " for tmp_tpg\n"); 1590 " for tmp_tpg\n");
1591 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1591 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1592 smp_mb__after_atomic_dec(); 1592 smp_mb__after_atomic_dec();
@@ -1607,7 +1607,7 @@ static int core_scsi3_decode_spec_i_port(
1607 } 1607 }
1608 spin_unlock_bh(&tmp_tpg->acl_node_lock); 1608 spin_unlock_bh(&tmp_tpg->acl_node_lock);
1609 1609
1610 if (!(dest_node_acl)) { 1610 if (!dest_node_acl) {
1611 core_scsi3_tpg_undepend_item(tmp_tpg); 1611 core_scsi3_tpg_undepend_item(tmp_tpg);
1612 spin_lock(&dev->se_port_lock); 1612 spin_lock(&dev->se_port_lock);
1613 continue; 1613 continue;
@@ -1615,7 +1615,7 @@ static int core_scsi3_decode_spec_i_port(
1615 1615
1616 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 1616 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
1617 if (ret != 0) { 1617 if (ret != 0) {
1618 printk(KERN_ERR "configfs_depend_item() failed" 1618 pr_err("configfs_depend_item() failed"
1619 " for dest_node_acl->acl_group\n"); 1619 " for dest_node_acl->acl_group\n");
1620 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1620 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1621 smp_mb__after_atomic_dec(); 1621 smp_mb__after_atomic_dec();
@@ -1625,7 +1625,7 @@ static int core_scsi3_decode_spec_i_port(
1625 } 1625 }
1626 1626
1627 dest_tpg = tmp_tpg; 1627 dest_tpg = tmp_tpg;
1628 printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" 1628 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
1629 " %s Port RTPI: %hu\n", 1629 " %s Port RTPI: %hu\n",
1630 dest_tpg->se_tpg_tfo->get_fabric_name(), 1630 dest_tpg->se_tpg_tfo->get_fabric_name(),
1631 dest_node_acl->initiatorname, dest_rtpi); 1631 dest_node_acl->initiatorname, dest_rtpi);
@@ -1635,20 +1635,20 @@ static int core_scsi3_decode_spec_i_port(
1635 } 1635 }
1636 spin_unlock(&dev->se_port_lock); 1636 spin_unlock(&dev->se_port_lock);
1637 1637
1638 if (!(dest_tpg)) { 1638 if (!dest_tpg) {
1639 printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate" 1639 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
1640 " dest_tpg\n"); 1640 " dest_tpg\n");
1641 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1641 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1642 goto out; 1642 goto out;
1643 } 1643 }
1644#if 0 1644#if 0
1645 printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" 1645 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
1646 " tid_len: %d for %s + %s\n", 1646 " tid_len: %d for %s + %s\n",
1647 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, 1647 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
1648 tpdl, tid_len, i_str, iport_ptr); 1648 tpdl, tid_len, i_str, iport_ptr);
1649#endif 1649#endif
1650 if (tid_len > tpdl) { 1650 if (tid_len > tpdl) {
1651 printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:" 1651 pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
1652 " %u for Transport ID: %s\n", tid_len, ptr); 1652 " %u for Transport ID: %s\n", tid_len, ptr);
1653 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1653 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1654 core_scsi3_tpg_undepend_item(dest_tpg); 1654 core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1662,8 +1662,8 @@ static int core_scsi3_decode_spec_i_port(
1662 */ 1662 */
1663 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, 1663 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
1664 dest_rtpi); 1664 dest_rtpi);
1665 if (!(dest_se_deve)) { 1665 if (!dest_se_deve) {
1666 printk(KERN_ERR "Unable to locate %s dest_se_deve" 1666 pr_err("Unable to locate %s dest_se_deve"
1667 " from destination RTPI: %hu\n", 1667 " from destination RTPI: %hu\n",
1668 dest_tpg->se_tpg_tfo->get_fabric_name(), 1668 dest_tpg->se_tpg_tfo->get_fabric_name(),
1669 dest_rtpi); 1669 dest_rtpi);
@@ -1676,7 +1676,7 @@ static int core_scsi3_decode_spec_i_port(
1676 1676
1677 ret = core_scsi3_lunacl_depend_item(dest_se_deve); 1677 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
1678 if (ret < 0) { 1678 if (ret < 0) {
1679 printk(KERN_ERR "core_scsi3_lunacl_depend_item()" 1679 pr_err("core_scsi3_lunacl_depend_item()"
1680 " failed\n"); 1680 " failed\n");
1681 atomic_dec(&dest_se_deve->pr_ref_count); 1681 atomic_dec(&dest_se_deve->pr_ref_count);
1682 smp_mb__after_atomic_dec(); 1682 smp_mb__after_atomic_dec();
@@ -1686,7 +1686,7 @@ static int core_scsi3_decode_spec_i_port(
1686 goto out; 1686 goto out;
1687 } 1687 }
1688#if 0 1688#if 0
1689 printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" 1689 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
1690 " dest_se_deve mapped_lun: %u\n", 1690 " dest_se_deve mapped_lun: %u\n",
1691 dest_tpg->se_tpg_tfo->get_fabric_name(), 1691 dest_tpg->se_tpg_tfo->get_fabric_name(),
1692 dest_node_acl->initiatorname, dest_se_deve->mapped_lun); 1692 dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
@@ -1714,8 +1714,8 @@ static int core_scsi3_decode_spec_i_port(
1714 */ 1714 */
1715 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), 1715 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
1716 GFP_KERNEL); 1716 GFP_KERNEL);
1717 if (!(tidh_new)) { 1717 if (!tidh_new) {
1718 printk(KERN_ERR "Unable to allocate tidh_new\n"); 1718 pr_err("Unable to allocate tidh_new\n");
1719 core_scsi3_lunacl_undepend_item(dest_se_deve); 1719 core_scsi3_lunacl_undepend_item(dest_se_deve);
1720 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1720 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1721 core_scsi3_tpg_undepend_item(dest_tpg); 1721 core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1746,7 +1746,7 @@ static int core_scsi3_decode_spec_i_port(
1746 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, 1746 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
1747 dest_node_acl, dest_se_deve, iport_ptr, 1747 dest_node_acl, dest_se_deve, iport_ptr,
1748 sa_res_key, all_tg_pt, aptpl); 1748 sa_res_key, all_tg_pt, aptpl);
1749 if (!(dest_pr_reg)) { 1749 if (!dest_pr_reg) {
1750 core_scsi3_lunacl_undepend_item(dest_se_deve); 1750 core_scsi3_lunacl_undepend_item(dest_se_deve);
1751 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1751 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1752 core_scsi3_tpg_undepend_item(dest_tpg); 1752 core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1795,7 +1795,7 @@ static int core_scsi3_decode_spec_i_port(
1795 __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, 1795 __core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
1796 dest_pr_reg, 0, 0); 1796 dest_pr_reg, 0, 0);
1797 1797
1798 printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" 1798 pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
1799 " registered Transport ID for Node: %s%s Mapped LUN:" 1799 " registered Transport ID for Node: %s%s Mapped LUN:"
1800 " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), 1800 " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
1801 dest_node_acl->initiatorname, (prf_isid) ? 1801 dest_node_acl->initiatorname, (prf_isid) ?
@@ -1923,7 +1923,7 @@ static int __core_scsi3_update_aptpl_buf(
1923 } 1923 }
1924 1924
1925 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1925 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1926 printk(KERN_ERR "Unable to update renaming" 1926 pr_err("Unable to update renaming"
1927 " APTPL metadata\n"); 1927 " APTPL metadata\n");
1928 spin_unlock(&su_dev->t10_pr.registration_lock); 1928 spin_unlock(&su_dev->t10_pr.registration_lock);
1929 return -EMSGSIZE; 1929 return -EMSGSIZE;
@@ -1941,7 +1941,7 @@ static int __core_scsi3_update_aptpl_buf(
1941 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); 1941 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1942 1942
1943 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1943 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1944 printk(KERN_ERR "Unable to update renaming" 1944 pr_err("Unable to update renaming"
1945 " APTPL metadata\n"); 1945 " APTPL metadata\n");
1946 spin_unlock(&su_dev->t10_pr.registration_lock); 1946 spin_unlock(&su_dev->t10_pr.registration_lock);
1947 return -EMSGSIZE; 1947 return -EMSGSIZE;
@@ -1951,7 +1951,7 @@ static int __core_scsi3_update_aptpl_buf(
1951 } 1951 }
1952 spin_unlock(&su_dev->t10_pr.registration_lock); 1952 spin_unlock(&su_dev->t10_pr.registration_lock);
1953 1953
1954 if (!(reg_count)) 1954 if (!reg_count)
1955 len += sprintf(buf+len, "No Registrations or Reservations"); 1955 len += sprintf(buf+len, "No Registrations or Reservations");
1956 1956
1957 return 0; 1957 return 0;
@@ -1993,7 +1993,7 @@ static int __core_scsi3_write_aptpl_to_file(
1993 memset(path, 0, 512); 1993 memset(path, 0, 512);
1994 1994
1995 if (strlen(&wwn->unit_serial[0]) >= 512) { 1995 if (strlen(&wwn->unit_serial[0]) >= 512) {
1996 printk(KERN_ERR "WWN value for struct se_device does not fit" 1996 pr_err("WWN value for struct se_device does not fit"
1997 " into path buffer\n"); 1997 " into path buffer\n");
1998 return -EMSGSIZE; 1998 return -EMSGSIZE;
1999 } 1999 }
@@ -2001,13 +2001,13 @@ static int __core_scsi3_write_aptpl_to_file(
2001 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); 2001 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
2002 file = filp_open(path, flags, 0600); 2002 file = filp_open(path, flags, 0600);
2003 if (IS_ERR(file) || !file || !file->f_dentry) { 2003 if (IS_ERR(file) || !file || !file->f_dentry) {
2004 printk(KERN_ERR "filp_open(%s) for APTPL metadata" 2004 pr_err("filp_open(%s) for APTPL metadata"
2005 " failed\n", path); 2005 " failed\n", path);
2006 return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); 2006 return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
2007 } 2007 }
2008 2008
2009 iov[0].iov_base = &buf[0]; 2009 iov[0].iov_base = &buf[0];
2010 if (!(pr_aptpl_buf_len)) 2010 if (!pr_aptpl_buf_len)
2011 iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ 2011 iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
2012 else 2012 else
2013 iov[0].iov_len = pr_aptpl_buf_len; 2013 iov[0].iov_len = pr_aptpl_buf_len;
@@ -2018,7 +2018,7 @@ static int __core_scsi3_write_aptpl_to_file(
2018 set_fs(old_fs); 2018 set_fs(old_fs);
2019 2019
2020 if (ret < 0) { 2020 if (ret < 0) {
2021 printk("Error writing APTPL metadata file: %s\n", path); 2021 pr_debug("Error writing APTPL metadata file: %s\n", path);
2022 filp_close(file, NULL); 2022 filp_close(file, NULL);
2023 return -EIO; 2023 return -EIO;
2024 } 2024 }
@@ -2038,7 +2038,7 @@ static int core_scsi3_update_and_write_aptpl(
2038 /* 2038 /*
2039 * Can be called with a NULL pointer from PROUT service action CLEAR 2039 * Can be called with a NULL pointer from PROUT service action CLEAR
2040 */ 2040 */
2041 if (!(in_buf)) { 2041 if (!in_buf) {
2042 memset(null_buf, 0, 64); 2042 memset(null_buf, 0, 64);
2043 buf = &null_buf[0]; 2043 buf = &null_buf[0];
2044 /* 2044 /*
@@ -2088,8 +2088,8 @@ static int core_scsi3_emulate_pro_register(
2088 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; 2088 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
2089 int pr_holder = 0, ret = 0, type; 2089 int pr_holder = 0, ret = 0, type;
2090 2090
2091 if (!(se_sess) || !(se_lun)) { 2091 if (!se_sess || !se_lun) {
2092 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2092 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2093 return PYX_TRANSPORT_LU_COMM_FAILURE; 2093 return PYX_TRANSPORT_LU_COMM_FAILURE;
2094 } 2094 }
2095 se_tpg = se_sess->se_tpg; 2095 se_tpg = se_sess->se_tpg;
@@ -2105,19 +2105,19 @@ static int core_scsi3_emulate_pro_register(
2105 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 2105 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
2106 */ 2106 */
2107 pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); 2107 pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2108 if (!(pr_reg_e)) { 2108 if (!pr_reg_e) {
2109 if (res_key) { 2109 if (res_key) {
2110 printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero" 2110 pr_warn("SPC-3 PR: Reservation Key non-zero"
2111 " for SA REGISTER, returning CONFLICT\n"); 2111 " for SA REGISTER, returning CONFLICT\n");
2112 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2112 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2113 } 2113 }
2114 /* 2114 /*
2115 * Do nothing but return GOOD status. 2115 * Do nothing but return GOOD status.
2116 */ 2116 */
2117 if (!(sa_res_key)) 2117 if (!sa_res_key)
2118 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2118 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2119 2119
2120 if (!(spec_i_pt)) { 2120 if (!spec_i_pt) {
2121 /* 2121 /*
2122 * Perform the Service Action REGISTER on the Initiator 2122 * Perform the Service Action REGISTER on the Initiator
2123 * Port Endpoint that the PRO was received from on the 2123 * Port Endpoint that the PRO was received from on the
@@ -2128,7 +2128,7 @@ static int core_scsi3_emulate_pro_register(
2128 sa_res_key, all_tg_pt, aptpl, 2128 sa_res_key, all_tg_pt, aptpl,
2129 ignore_key, 0); 2129 ignore_key, 0);
2130 if (ret != 0) { 2130 if (ret != 0) {
2131 printk(KERN_ERR "Unable to allocate" 2131 pr_err("Unable to allocate"
2132 " struct t10_pr_registration\n"); 2132 " struct t10_pr_registration\n");
2133 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2133 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2134 } 2134 }
@@ -2149,10 +2149,10 @@ static int core_scsi3_emulate_pro_register(
2149 /* 2149 /*
2150 * Nothing left to do for the APTPL=0 case. 2150 * Nothing left to do for the APTPL=0 case.
2151 */ 2151 */
2152 if (!(aptpl)) { 2152 if (!aptpl) {
2153 pr_tmpl->pr_aptpl_active = 0; 2153 pr_tmpl->pr_aptpl_active = 0;
2154 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 2154 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
2155 printk("SPC-3 PR: Set APTPL Bit Deactivated for" 2155 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
2156 " REGISTER\n"); 2156 " REGISTER\n");
2157 return 0; 2157 return 0;
2158 } 2158 }
@@ -2167,9 +2167,9 @@ static int core_scsi3_emulate_pro_register(
2167 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2167 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2168 &pr_reg->pr_aptpl_buf[0], 2168 &pr_reg->pr_aptpl_buf[0],
2169 pr_tmpl->pr_aptpl_buf_len); 2169 pr_tmpl->pr_aptpl_buf_len);
2170 if (!(ret)) { 2170 if (!ret) {
2171 pr_tmpl->pr_aptpl_active = 1; 2171 pr_tmpl->pr_aptpl_active = 1;
2172 printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); 2172 pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
2173 } 2173 }
2174 2174
2175 core_scsi3_put_pr_reg(pr_reg); 2175 core_scsi3_put_pr_reg(pr_reg);
@@ -2181,9 +2181,9 @@ static int core_scsi3_emulate_pro_register(
2181 pr_reg = pr_reg_e; 2181 pr_reg = pr_reg_e;
2182 type = pr_reg->pr_res_type; 2182 type = pr_reg->pr_res_type;
2183 2183
2184 if (!(ignore_key)) { 2184 if (!ignore_key) {
2185 if (res_key != pr_reg->pr_res_key) { 2185 if (res_key != pr_reg->pr_res_key) {
2186 printk(KERN_ERR "SPC-3 PR REGISTER: Received" 2186 pr_err("SPC-3 PR REGISTER: Received"
2187 " res_key: 0x%016Lx does not match" 2187 " res_key: 0x%016Lx does not match"
2188 " existing SA REGISTER res_key:" 2188 " existing SA REGISTER res_key:"
2189 " 0x%016Lx\n", res_key, 2189 " 0x%016Lx\n", res_key,
@@ -2193,7 +2193,7 @@ static int core_scsi3_emulate_pro_register(
2193 } 2193 }
2194 } 2194 }
2195 if (spec_i_pt) { 2195 if (spec_i_pt) {
2196 printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT" 2196 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
2197 " set while sa_res_key=0\n"); 2197 " set while sa_res_key=0\n");
2198 core_scsi3_put_pr_reg(pr_reg); 2198 core_scsi3_put_pr_reg(pr_reg);
2199 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2199 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -2203,7 +2203,7 @@ static int core_scsi3_emulate_pro_register(
2203 * must also set ALL_TG_PT=1 in the incoming PROUT. 2203 * must also set ALL_TG_PT=1 in the incoming PROUT.
2204 */ 2204 */
2205 if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { 2205 if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
2206 printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1" 2206 pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
2207 " registration exists, but ALL_TG_PT=1 bit not" 2207 " registration exists, but ALL_TG_PT=1 bit not"
2208 " present in received PROUT\n"); 2208 " present in received PROUT\n");
2209 core_scsi3_put_pr_reg(pr_reg); 2209 core_scsi3_put_pr_reg(pr_reg);
@@ -2215,8 +2215,8 @@ static int core_scsi3_emulate_pro_register(
2215 if (aptpl) { 2215 if (aptpl) {
2216 pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, 2216 pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
2217 GFP_KERNEL); 2217 GFP_KERNEL);
2218 if (!(pr_aptpl_buf)) { 2218 if (!pr_aptpl_buf) {
2219 printk(KERN_ERR "Unable to allocate" 2219 pr_err("Unable to allocate"
2220 " pr_aptpl_buf\n"); 2220 " pr_aptpl_buf\n");
2221 core_scsi3_put_pr_reg(pr_reg); 2221 core_scsi3_put_pr_reg(pr_reg);
2222 return PYX_TRANSPORT_LU_COMM_FAILURE; 2222 return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -2227,7 +2227,7 @@ static int core_scsi3_emulate_pro_register(
2227 * Nexus sa_res_key=1 Change Reservation Key for registered I_T 2227 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
2228 * Nexus. 2228 * Nexus.
2229 */ 2229 */
2230 if (!(sa_res_key)) { 2230 if (!sa_res_key) {
2231 pr_holder = core_scsi3_check_implict_release( 2231 pr_holder = core_scsi3_check_implict_release(
2232 cmd->se_dev, pr_reg); 2232 cmd->se_dev, pr_reg);
2233 if (pr_holder < 0) { 2233 if (pr_holder < 0) {
@@ -2246,7 +2246,7 @@ static int core_scsi3_emulate_pro_register(
2246 &pr_tmpl->registration_list, 2246 &pr_tmpl->registration_list,
2247 pr_reg_list) { 2247 pr_reg_list) {
2248 2248
2249 if (!(pr_reg_p->pr_reg_all_tg_pt)) 2249 if (!pr_reg_p->pr_reg_all_tg_pt)
2250 continue; 2250 continue;
2251 2251
2252 if (pr_reg_p->pr_res_key != res_key) 2252 if (pr_reg_p->pr_res_key != res_key)
@@ -2295,10 +2295,10 @@ static int core_scsi3_emulate_pro_register(
2295 } 2295 }
2296 spin_unlock(&pr_tmpl->registration_lock); 2296 spin_unlock(&pr_tmpl->registration_lock);
2297 2297
2298 if (!(aptpl)) { 2298 if (!aptpl) {
2299 pr_tmpl->pr_aptpl_active = 0; 2299 pr_tmpl->pr_aptpl_active = 0;
2300 core_scsi3_update_and_write_aptpl(dev, NULL, 0); 2300 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2301 printk("SPC-3 PR: Set APTPL Bit Deactivated" 2301 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
2302 " for UNREGISTER\n"); 2302 " for UNREGISTER\n");
2303 return 0; 2303 return 0;
2304 } 2304 }
@@ -2306,9 +2306,9 @@ static int core_scsi3_emulate_pro_register(
2306 ret = core_scsi3_update_and_write_aptpl(dev, 2306 ret = core_scsi3_update_and_write_aptpl(dev,
2307 &pr_aptpl_buf[0], 2307 &pr_aptpl_buf[0],
2308 pr_tmpl->pr_aptpl_buf_len); 2308 pr_tmpl->pr_aptpl_buf_len);
2309 if (!(ret)) { 2309 if (!ret) {
2310 pr_tmpl->pr_aptpl_active = 1; 2310 pr_tmpl->pr_aptpl_active = 1;
2311 printk("SPC-3 PR: Set APTPL Bit Activated" 2311 pr_debug("SPC-3 PR: Set APTPL Bit Activated"
2312 " for UNREGISTER\n"); 2312 " for UNREGISTER\n");
2313 } 2313 }
2314 2314
@@ -2323,18 +2323,18 @@ static int core_scsi3_emulate_pro_register(
2323 pr_reg->pr_res_generation = core_scsi3_pr_generation( 2323 pr_reg->pr_res_generation = core_scsi3_pr_generation(
2324 cmd->se_dev); 2324 cmd->se_dev);
2325 pr_reg->pr_res_key = sa_res_key; 2325 pr_reg->pr_res_key = sa_res_key;
2326 printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" 2326 pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
2327 " Key for %s to: 0x%016Lx PRgeneration:" 2327 " Key for %s to: 0x%016Lx PRgeneration:"
2328 " 0x%08x\n", cmd->se_tfo->get_fabric_name(), 2328 " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
2329 (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", 2329 (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
2330 pr_reg->pr_reg_nacl->initiatorname, 2330 pr_reg->pr_reg_nacl->initiatorname,
2331 pr_reg->pr_res_key, pr_reg->pr_res_generation); 2331 pr_reg->pr_res_key, pr_reg->pr_res_generation);
2332 2332
2333 if (!(aptpl)) { 2333 if (!aptpl) {
2334 pr_tmpl->pr_aptpl_active = 0; 2334 pr_tmpl->pr_aptpl_active = 0;
2335 core_scsi3_update_and_write_aptpl(dev, NULL, 0); 2335 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2336 core_scsi3_put_pr_reg(pr_reg); 2336 core_scsi3_put_pr_reg(pr_reg);
2337 printk("SPC-3 PR: Set APTPL Bit Deactivated" 2337 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
2338 " for REGISTER\n"); 2338 " for REGISTER\n");
2339 return 0; 2339 return 0;
2340 } 2340 }
@@ -2342,9 +2342,9 @@ static int core_scsi3_emulate_pro_register(
2342 ret = core_scsi3_update_and_write_aptpl(dev, 2342 ret = core_scsi3_update_and_write_aptpl(dev,
2343 &pr_aptpl_buf[0], 2343 &pr_aptpl_buf[0],
2344 pr_tmpl->pr_aptpl_buf_len); 2344 pr_tmpl->pr_aptpl_buf_len);
2345 if (!(ret)) { 2345 if (!ret) {
2346 pr_tmpl->pr_aptpl_active = 1; 2346 pr_tmpl->pr_aptpl_active = 1;
2347 printk("SPC-3 PR: Set APTPL Bit Activated" 2347 pr_debug("SPC-3 PR: Set APTPL Bit Activated"
2348 " for REGISTER\n"); 2348 " for REGISTER\n");
2349 } 2349 }
2350 2350
@@ -2395,8 +2395,8 @@ static int core_scsi3_pro_reserve(
2395 2395
2396 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2396 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2397 2397
2398 if (!(se_sess) || !(se_lun)) { 2398 if (!se_sess || !se_lun) {
2399 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2399 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2400 return PYX_TRANSPORT_LU_COMM_FAILURE; 2400 return PYX_TRANSPORT_LU_COMM_FAILURE;
2401 } 2401 }
2402 se_tpg = se_sess->se_tpg; 2402 se_tpg = se_sess->se_tpg;
@@ -2406,8 +2406,8 @@ static int core_scsi3_pro_reserve(
2406 */ 2406 */
2407 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 2407 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
2408 se_sess); 2408 se_sess);
2409 if (!(pr_reg)) { 2409 if (!pr_reg) {
2410 printk(KERN_ERR "SPC-3 PR: Unable to locate" 2410 pr_err("SPC-3 PR: Unable to locate"
2411 " PR_REGISTERED *pr_reg for RESERVE\n"); 2411 " PR_REGISTERED *pr_reg for RESERVE\n");
2412 return PYX_TRANSPORT_LU_COMM_FAILURE; 2412 return PYX_TRANSPORT_LU_COMM_FAILURE;
2413 } 2413 }
@@ -2421,7 +2421,7 @@ static int core_scsi3_pro_reserve(
2421 * registered with the logical unit for the I_T nexus; and 2421 * registered with the logical unit for the I_T nexus; and
2422 */ 2422 */
2423 if (res_key != pr_reg->pr_res_key) { 2423 if (res_key != pr_reg->pr_res_key) {
2424 printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx" 2424 pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
2425 " does not match existing SA REGISTER res_key:" 2425 " does not match existing SA REGISTER res_key:"
2426 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2426 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2427 core_scsi3_put_pr_reg(pr_reg); 2427 core_scsi3_put_pr_reg(pr_reg);
@@ -2438,7 +2438,7 @@ static int core_scsi3_pro_reserve(
2438 * and that persistent reservation has a scope of LU_SCOPE. 2438 * and that persistent reservation has a scope of LU_SCOPE.
2439 */ 2439 */
2440 if (scope != PR_SCOPE_LU_SCOPE) { 2440 if (scope != PR_SCOPE_LU_SCOPE) {
2441 printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2441 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2442 core_scsi3_put_pr_reg(pr_reg); 2442 core_scsi3_put_pr_reg(pr_reg);
2443 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2443 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2444 } 2444 }
@@ -2462,7 +2462,7 @@ static int core_scsi3_pro_reserve(
2462 */ 2462 */
2463 if (pr_res_holder != pr_reg) { 2463 if (pr_res_holder != pr_reg) {
2464 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2464 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2465 printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" 2465 pr_err("SPC-3 PR: Attempted RESERVE from"
2466 " [%s]: %s while reservation already held by" 2466 " [%s]: %s while reservation already held by"
2467 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2467 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2468 cmd->se_tfo->get_fabric_name(), 2468 cmd->se_tfo->get_fabric_name(),
@@ -2484,7 +2484,7 @@ static int core_scsi3_pro_reserve(
2484 if ((pr_res_holder->pr_res_type != type) || 2484 if ((pr_res_holder->pr_res_type != type) ||
2485 (pr_res_holder->pr_res_scope != scope)) { 2485 (pr_res_holder->pr_res_scope != scope)) {
2486 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2486 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2487 printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" 2487 pr_err("SPC-3 PR: Attempted RESERVE from"
2488 " [%s]: %s trying to change TYPE and/or SCOPE," 2488 " [%s]: %s trying to change TYPE and/or SCOPE,"
2489 " while reservation already held by [%s]: %s," 2489 " while reservation already held by [%s]: %s,"
2490 " returning RESERVATION_CONFLICT\n", 2490 " returning RESERVATION_CONFLICT\n",
@@ -2522,11 +2522,11 @@ static int core_scsi3_pro_reserve(
2522 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 2522 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2523 PR_REG_ISID_ID_LEN); 2523 PR_REG_ISID_ID_LEN);
2524 2524
2525 printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" 2525 pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
2526 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2526 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2527 cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), 2527 cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
2528 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2528 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2529 printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", 2529 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
2530 cmd->se_tfo->get_fabric_name(), 2530 cmd->se_tfo->get_fabric_name(),
2531 se_sess->se_node_acl->initiatorname, 2531 se_sess->se_node_acl->initiatorname,
2532 (prf_isid) ? &i_buf[0] : ""); 2532 (prf_isid) ? &i_buf[0] : "");
@@ -2536,8 +2536,8 @@ static int core_scsi3_pro_reserve(
2536 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2536 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2537 &pr_reg->pr_aptpl_buf[0], 2537 &pr_reg->pr_aptpl_buf[0],
2538 pr_tmpl->pr_aptpl_buf_len); 2538 pr_tmpl->pr_aptpl_buf_len);
2539 if (!(ret)) 2539 if (!ret)
2540 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" 2540 pr_debug("SPC-3 PR: Updated APTPL metadata"
2541 " for RESERVE\n"); 2541 " for RESERVE\n");
2542 } 2542 }
2543 2543
@@ -2564,7 +2564,7 @@ static int core_scsi3_emulate_pro_reserve(
2564 ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); 2564 ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
2565 break; 2565 break;
2566 default: 2566 default:
2567 printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:" 2567 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
2568 " 0x%02x\n", type); 2568 " 0x%02x\n", type);
2569 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2569 return PYX_TRANSPORT_INVALID_CDB_FIELD;
2570 } 2570 }
@@ -2593,12 +2593,12 @@ static void __core_scsi3_complete_pro_release(
2593 */ 2593 */
2594 dev->dev_pr_res_holder = NULL; 2594 dev->dev_pr_res_holder = NULL;
2595 2595
2596 printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2596 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2597 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2597 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2598 tfo->get_fabric_name(), (explict) ? "explict" : "implict", 2598 tfo->get_fabric_name(), (explict) ? "explict" : "implict",
2599 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 2599 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
2600 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2600 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2601 printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n", 2601 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
2602 tfo->get_fabric_name(), se_nacl->initiatorname, 2602 tfo->get_fabric_name(), se_nacl->initiatorname,
2603 (prf_isid) ? &i_buf[0] : ""); 2603 (prf_isid) ? &i_buf[0] : "");
2604 /* 2604 /*
@@ -2620,16 +2620,16 @@ static int core_scsi3_emulate_pro_release(
2620 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2620 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
2621 int ret, all_reg = 0; 2621 int ret, all_reg = 0;
2622 2622
2623 if (!(se_sess) || !(se_lun)) { 2623 if (!se_sess || !se_lun) {
2624 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2624 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2625 return PYX_TRANSPORT_LU_COMM_FAILURE; 2625 return PYX_TRANSPORT_LU_COMM_FAILURE;
2626 } 2626 }
2627 /* 2627 /*
2628 * Locate the existing *pr_reg via struct se_node_acl pointers 2628 * Locate the existing *pr_reg via struct se_node_acl pointers
2629 */ 2629 */
2630 pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); 2630 pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2631 if (!(pr_reg)) { 2631 if (!pr_reg) {
2632 printk(KERN_ERR "SPC-3 PR: Unable to locate" 2632 pr_err("SPC-3 PR: Unable to locate"
2633 " PR_REGISTERED *pr_reg for RELEASE\n"); 2633 " PR_REGISTERED *pr_reg for RELEASE\n");
2634 return PYX_TRANSPORT_LU_COMM_FAILURE; 2634 return PYX_TRANSPORT_LU_COMM_FAILURE;
2635 } 2635 }
@@ -2647,7 +2647,7 @@ static int core_scsi3_emulate_pro_release(
2647 */ 2647 */
2648 spin_lock(&dev->dev_reservation_lock); 2648 spin_lock(&dev->dev_reservation_lock);
2649 pr_res_holder = dev->dev_pr_res_holder; 2649 pr_res_holder = dev->dev_pr_res_holder;
2650 if (!(pr_res_holder)) { 2650 if (!pr_res_holder) {
2651 /* 2651 /*
2652 * No persistent reservation, return GOOD status. 2652 * No persistent reservation, return GOOD status.
2653 */ 2653 */
@@ -2684,7 +2684,7 @@ static int core_scsi3_emulate_pro_release(
2684 * that is registered with the logical unit for the I_T nexus; 2684 * that is registered with the logical unit for the I_T nexus;
2685 */ 2685 */
2686 if (res_key != pr_reg->pr_res_key) { 2686 if (res_key != pr_reg->pr_res_key) {
2687 printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx" 2687 pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
2688 " does not match existing SA REGISTER res_key:" 2688 " does not match existing SA REGISTER res_key:"
2689 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2689 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2690 spin_unlock(&dev->dev_reservation_lock); 2690 spin_unlock(&dev->dev_reservation_lock);
@@ -2700,7 +2700,7 @@ static int core_scsi3_emulate_pro_release(
2700 if ((pr_res_holder->pr_res_type != type) || 2700 if ((pr_res_holder->pr_res_type != type) ||
2701 (pr_res_holder->pr_res_scope != scope)) { 2701 (pr_res_holder->pr_res_scope != scope)) {
2702 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2702 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2703 printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release" 2703 pr_err("SPC-3 PR RELEASE: Attempted to release"
2704 " reservation from [%s]: %s with different TYPE " 2704 " reservation from [%s]: %s with different TYPE "
2705 "and/or SCOPE while reservation already held by" 2705 "and/or SCOPE while reservation already held by"
2706 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2706 " [%s]: %s, returning RESERVATION_CONFLICT\n",
@@ -2767,8 +2767,8 @@ write_aptpl:
2767 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2767 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2768 &pr_reg->pr_aptpl_buf[0], 2768 &pr_reg->pr_aptpl_buf[0],
2769 pr_tmpl->pr_aptpl_buf_len); 2769 pr_tmpl->pr_aptpl_buf_len);
2770 if (!(ret)) 2770 if (!ret)
2771 printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); 2771 pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
2772 } 2772 }
2773 2773
2774 core_scsi3_put_pr_reg(pr_reg); 2774 core_scsi3_put_pr_reg(pr_reg);
@@ -2791,8 +2791,8 @@ static int core_scsi3_emulate_pro_clear(
2791 */ 2791 */
2792 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, 2792 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
2793 se_sess->se_node_acl, se_sess); 2793 se_sess->se_node_acl, se_sess);
2794 if (!(pr_reg_n)) { 2794 if (!pr_reg_n) {
2795 printk(KERN_ERR "SPC-3 PR: Unable to locate" 2795 pr_err("SPC-3 PR: Unable to locate"
2796 " PR_REGISTERED *pr_reg for CLEAR\n"); 2796 " PR_REGISTERED *pr_reg for CLEAR\n");
2797 return PYX_TRANSPORT_LU_COMM_FAILURE; 2797 return PYX_TRANSPORT_LU_COMM_FAILURE;
2798 } 2798 }
@@ -2808,7 +2808,7 @@ static int core_scsi3_emulate_pro_clear(
2808 * that is registered with the logical unit for the I_T nexus. 2808 * that is registered with the logical unit for the I_T nexus.
2809 */ 2809 */
2810 if (res_key != pr_reg_n->pr_res_key) { 2810 if (res_key != pr_reg_n->pr_res_key) {
2811 printk(KERN_ERR "SPC-3 PR REGISTER: Received" 2811 pr_err("SPC-3 PR REGISTER: Received"
2812 " res_key: 0x%016Lx does not match" 2812 " res_key: 0x%016Lx does not match"
2813 " existing SA REGISTER res_key:" 2813 " existing SA REGISTER res_key:"
2814 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); 2814 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
@@ -2845,18 +2845,18 @@ static int core_scsi3_emulate_pro_clear(
2845 * command with CLEAR service action was received, with the 2845 * command with CLEAR service action was received, with the
2846 * additional sense code set to RESERVATIONS PREEMPTED. 2846 * additional sense code set to RESERVATIONS PREEMPTED.
2847 */ 2847 */
2848 if (!(calling_it_nexus)) 2848 if (!calling_it_nexus)
2849 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 2849 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
2850 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); 2850 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
2851 } 2851 }
2852 spin_unlock(&pr_tmpl->registration_lock); 2852 spin_unlock(&pr_tmpl->registration_lock);
2853 2853
2854 printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", 2854 pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
2855 cmd->se_tfo->get_fabric_name()); 2855 cmd->se_tfo->get_fabric_name());
2856 2856
2857 if (pr_tmpl->pr_aptpl_active) { 2857 if (pr_tmpl->pr_aptpl_active) {
2858 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 2858 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
2859 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" 2859 pr_debug("SPC-3 PR: Updated APTPL metadata"
2860 " for CLEAR\n"); 2860 " for CLEAR\n");
2861 } 2861 }
2862 2862
@@ -2895,12 +2895,12 @@ static void __core_scsi3_complete_pro_preempt(
2895 pr_reg->pr_res_type = type; 2895 pr_reg->pr_res_type = type;
2896 pr_reg->pr_res_scope = scope; 2896 pr_reg->pr_res_scope = scope;
2897 2897
2898 printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new" 2898 pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
2899 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2899 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2900 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", 2900 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2901 core_scsi3_pr_dump_type(type), 2901 core_scsi3_pr_dump_type(type),
2902 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2902 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2903 printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", 2903 pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
2904 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", 2904 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2905 nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); 2905 nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
2906 /* 2906 /*
@@ -2926,7 +2926,7 @@ static void core_scsi3_release_preempt_and_abort(
2926 if (pr_reg_holder == pr_reg) 2926 if (pr_reg_holder == pr_reg)
2927 continue; 2927 continue;
2928 if (pr_reg->pr_res_holder) { 2928 if (pr_reg->pr_res_holder) {
2929 printk(KERN_WARNING "pr_reg->pr_res_holder still set\n"); 2929 pr_warn("pr_reg->pr_res_holder still set\n");
2930 continue; 2930 continue;
2931 } 2931 }
2932 2932
@@ -2971,14 +2971,14 @@ static int core_scsi3_pro_preempt(
2971 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 2971 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
2972 int prh_type = 0, prh_scope = 0, ret; 2972 int prh_type = 0, prh_scope = 0, ret;
2973 2973
2974 if (!(se_sess)) 2974 if (!se_sess)
2975 return PYX_TRANSPORT_LU_COMM_FAILURE; 2975 return PYX_TRANSPORT_LU_COMM_FAILURE;
2976 2976
2977 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2977 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2978 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 2978 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
2979 se_sess); 2979 se_sess);
2980 if (!(pr_reg_n)) { 2980 if (!pr_reg_n) {
2981 printk(KERN_ERR "SPC-3 PR: Unable to locate" 2981 pr_err("SPC-3 PR: Unable to locate"
2982 " PR_REGISTERED *pr_reg for PREEMPT%s\n", 2982 " PR_REGISTERED *pr_reg for PREEMPT%s\n",
2983 (abort) ? "_AND_ABORT" : ""); 2983 (abort) ? "_AND_ABORT" : "");
2984 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2984 return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -2988,7 +2988,7 @@ static int core_scsi3_pro_preempt(
2988 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2988 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2989 } 2989 }
2990 if (scope != PR_SCOPE_LU_SCOPE) { 2990 if (scope != PR_SCOPE_LU_SCOPE) {
2991 printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2991 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2992 core_scsi3_put_pr_reg(pr_reg_n); 2992 core_scsi3_put_pr_reg(pr_reg_n);
2993 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2993 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2994 } 2994 }
@@ -3001,7 +3001,7 @@ static int core_scsi3_pro_preempt(
3001 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) 3001 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
3002 all_reg = 1; 3002 all_reg = 1;
3003 3003
3004 if (!(all_reg) && !(sa_res_key)) { 3004 if (!all_reg && !sa_res_key) {
3005 spin_unlock(&dev->dev_reservation_lock); 3005 spin_unlock(&dev->dev_reservation_lock);
3006 core_scsi3_put_pr_reg(pr_reg_n); 3006 core_scsi3_put_pr_reg(pr_reg_n);
3007 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3007 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3015,7 +3015,7 @@ static int core_scsi3_pro_preempt(
3015 * server shall perform a preempt by doing the following in an 3015 * server shall perform a preempt by doing the following in an
3016 * uninterrupted series of actions. (See below..) 3016 * uninterrupted series of actions. (See below..)
3017 */ 3017 */
3018 if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) { 3018 if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
3019 /* 3019 /*
3020 * No existing or SA Reservation Key matching reservations.. 3020 * No existing or SA Reservation Key matching reservations..
3021 * 3021 *
@@ -3042,7 +3042,7 @@ static int core_scsi3_pro_preempt(
3042 * was received, with the additional sense code set 3042 * was received, with the additional sense code set
3043 * to REGISTRATIONS PREEMPTED. 3043 * to REGISTRATIONS PREEMPTED.
3044 */ 3044 */
3045 if (!(all_reg)) { 3045 if (!all_reg) {
3046 if (pr_reg->pr_res_key != sa_res_key) 3046 if (pr_reg->pr_res_key != sa_res_key)
3047 continue; 3047 continue;
3048 3048
@@ -3082,7 +3082,7 @@ static int core_scsi3_pro_preempt(
3082 NULL, 0); 3082 NULL, 0);
3083 released_regs++; 3083 released_regs++;
3084 } 3084 }
3085 if (!(calling_it_nexus)) 3085 if (!calling_it_nexus)
3086 core_scsi3_ua_allocate(pr_reg_nacl, 3086 core_scsi3_ua_allocate(pr_reg_nacl,
3087 pr_res_mapped_lun, 0x2A, 3087 pr_res_mapped_lun, 0x2A,
3088 ASCQ_2AH_RESERVATIONS_PREEMPTED); 3088 ASCQ_2AH_RESERVATIONS_PREEMPTED);
@@ -3095,7 +3095,7 @@ static int core_scsi3_pro_preempt(
3095 * registered reservation key, then the device server shall 3095 * registered reservation key, then the device server shall
3096 * complete the command with RESERVATION CONFLICT status. 3096 * complete the command with RESERVATION CONFLICT status.
3097 */ 3097 */
3098 if (!(released_regs)) { 3098 if (!released_regs) {
3099 spin_unlock(&dev->dev_reservation_lock); 3099 spin_unlock(&dev->dev_reservation_lock);
3100 core_scsi3_put_pr_reg(pr_reg_n); 3100 core_scsi3_put_pr_reg(pr_reg_n);
3101 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3101 return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3120,8 +3120,8 @@ static int core_scsi3_pro_preempt(
3120 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3120 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3121 &pr_reg_n->pr_aptpl_buf[0], 3121 &pr_reg_n->pr_aptpl_buf[0],
3122 pr_tmpl->pr_aptpl_buf_len); 3122 pr_tmpl->pr_aptpl_buf_len);
3123 if (!(ret)) 3123 if (!ret)
3124 printk(KERN_INFO "SPC-3 PR: Updated APTPL" 3124 pr_debug("SPC-3 PR: Updated APTPL"
3125 " metadata for PREEMPT%s\n", (abort) ? 3125 " metadata for PREEMPT%s\n", (abort) ?
3126 "_AND_ABORT" : ""); 3126 "_AND_ABORT" : "");
3127 } 3127 }
@@ -3256,8 +3256,8 @@ static int core_scsi3_pro_preempt(
3256 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3256 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3257 &pr_reg_n->pr_aptpl_buf[0], 3257 &pr_reg_n->pr_aptpl_buf[0],
3258 pr_tmpl->pr_aptpl_buf_len); 3258 pr_tmpl->pr_aptpl_buf_len);
3259 if (!(ret)) 3259 if (!ret)
3260 printk("SPC-3 PR: Updated APTPL metadata for PREEMPT" 3260 pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
3261 "%s\n", (abort) ? "_AND_ABORT" : ""); 3261 "%s\n", (abort) ? "_AND_ABORT" : "");
3262 } 3262 }
3263 3263
@@ -3287,7 +3287,7 @@ static int core_scsi3_emulate_pro_preempt(
3287 res_key, sa_res_key, abort); 3287 res_key, sa_res_key, abort);
3288 break; 3288 break;
3289 default: 3289 default:
3290 printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s" 3290 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
3291 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); 3291 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
3292 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3292 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3293 } 3293 }
@@ -3321,8 +3321,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3321 unsigned short rtpi; 3321 unsigned short rtpi;
3322 unsigned char proto_ident; 3322 unsigned char proto_ident;
3323 3323
3324 if (!(se_sess) || !(se_lun)) { 3324 if (!se_sess || !se_lun) {
3325 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3325 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
3326 return PYX_TRANSPORT_LU_COMM_FAILURE; 3326 return PYX_TRANSPORT_LU_COMM_FAILURE;
3327 } 3327 }
3328 memset(dest_iport, 0, 64); 3328 memset(dest_iport, 0, 64);
@@ -3338,8 +3338,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3338 */ 3338 */
3339 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3339 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
3340 se_sess); 3340 se_sess);
3341 if (!(pr_reg)) { 3341 if (!pr_reg) {
3342 printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" 3342 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
3343 " *pr_reg for REGISTER_AND_MOVE\n"); 3343 " *pr_reg for REGISTER_AND_MOVE\n");
3344 return PYX_TRANSPORT_LU_COMM_FAILURE; 3344 return PYX_TRANSPORT_LU_COMM_FAILURE;
3345 } 3345 }
@@ -3348,7 +3348,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3348 * provided during this initiator's I_T nexus registration. 3348 * provided during this initiator's I_T nexus registration.
3349 */ 3349 */
3350 if (res_key != pr_reg->pr_res_key) { 3350 if (res_key != pr_reg->pr_res_key) {
3351 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received" 3351 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
3352 " res_key: 0x%016Lx does not match existing SA REGISTER" 3352 " res_key: 0x%016Lx does not match existing SA REGISTER"
3353 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); 3353 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
3354 core_scsi3_put_pr_reg(pr_reg); 3354 core_scsi3_put_pr_reg(pr_reg);
@@ -3357,8 +3357,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3357 /* 3357 /*
3358 * The service active reservation key needs to be non zero 3358 * The service active reservation key needs to be non zero
3359 */ 3359 */
3360 if (!(sa_res_key)) { 3360 if (!sa_res_key) {
3361 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero" 3361 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
3362 " sa_res_key\n"); 3362 " sa_res_key\n");
3363 core_scsi3_put_pr_reg(pr_reg); 3363 core_scsi3_put_pr_reg(pr_reg);
3364 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3364 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3380,7 +3380,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3380 buf = NULL; 3380 buf = NULL;
3381 3381
3382 if ((tid_len + 24) != cmd->data_length) { 3382 if ((tid_len + 24) != cmd->data_length) {
3383 printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" 3383 pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
3384 " does not equal CDB data_length: %u\n", tid_len, 3384 " does not equal CDB data_length: %u\n", tid_len,
3385 cmd->data_length); 3385 cmd->data_length);
3386 core_scsi3_put_pr_reg(pr_reg); 3386 core_scsi3_put_pr_reg(pr_reg);
@@ -3392,10 +3392,10 @@ static int core_scsi3_emulate_pro_register_and_move(
3392 if (se_port->sep_rtpi != rtpi) 3392 if (se_port->sep_rtpi != rtpi)
3393 continue; 3393 continue;
3394 dest_se_tpg = se_port->sep_tpg; 3394 dest_se_tpg = se_port->sep_tpg;
3395 if (!(dest_se_tpg)) 3395 if (!dest_se_tpg)
3396 continue; 3396 continue;
3397 dest_tf_ops = dest_se_tpg->se_tpg_tfo; 3397 dest_tf_ops = dest_se_tpg->se_tpg_tfo;
3398 if (!(dest_tf_ops)) 3398 if (!dest_tf_ops)
3399 continue; 3399 continue;
3400 3400
3401 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3401 atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
@@ -3404,7 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3404 3404
3405 ret = core_scsi3_tpg_depend_item(dest_se_tpg); 3405 ret = core_scsi3_tpg_depend_item(dest_se_tpg);
3406 if (ret != 0) { 3406 if (ret != 0) {
3407 printk(KERN_ERR "core_scsi3_tpg_depend_item() failed" 3407 pr_err("core_scsi3_tpg_depend_item() failed"
3408 " for dest_se_tpg\n"); 3408 " for dest_se_tpg\n");
3409 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3409 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3410 smp_mb__after_atomic_dec(); 3410 smp_mb__after_atomic_dec();
@@ -3417,8 +3417,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3417 } 3417 }
3418 spin_unlock(&dev->se_port_lock); 3418 spin_unlock(&dev->se_port_lock);
3419 3419
3420 if (!(dest_se_tpg) || (!dest_tf_ops)) { 3420 if (!dest_se_tpg || !dest_tf_ops) {
3421 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3421 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3422 " fabric ops from Relative Target Port Identifier:" 3422 " fabric ops from Relative Target Port Identifier:"
3423 " %hu\n", rtpi); 3423 " %hu\n", rtpi);
3424 core_scsi3_put_pr_reg(pr_reg); 3424 core_scsi3_put_pr_reg(pr_reg);
@@ -3428,11 +3428,11 @@ static int core_scsi3_emulate_pro_register_and_move(
3428 buf = transport_kmap_first_data_page(cmd); 3428 buf = transport_kmap_first_data_page(cmd);
3429 proto_ident = (buf[24] & 0x0f); 3429 proto_ident = (buf[24] & 0x0f);
3430#if 0 3430#if 0
3431 printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" 3431 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
3432 " 0x%02x\n", proto_ident); 3432 " 0x%02x\n", proto_ident);
3433#endif 3433#endif
3434 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { 3434 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
3435 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received" 3435 pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
3436 " proto_ident: 0x%02x does not match ident: 0x%02x" 3436 " proto_ident: 0x%02x does not match ident: 0x%02x"
3437 " from fabric: %s\n", proto_ident, 3437 " from fabric: %s\n", proto_ident,
3438 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), 3438 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
@@ -3441,7 +3441,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3441 goto out; 3441 goto out;
3442 } 3442 }
3443 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { 3443 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
3444 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not" 3444 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
3445 " containg a valid tpg_parse_pr_out_transport_id" 3445 " containg a valid tpg_parse_pr_out_transport_id"
3446 " function pointer\n"); 3446 " function pointer\n");
3447 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3447 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -3449,8 +3449,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3449 } 3449 }
3450 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, 3450 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
3451 (const char *)&buf[24], &tmp_tid_len, &iport_ptr); 3451 (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
3452 if (!(initiator_str)) { 3452 if (!initiator_str) {
3453 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3453 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3454 " initiator_str from Transport ID\n"); 3454 " initiator_str from Transport ID\n");
3455 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3455 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3456 goto out; 3456 goto out;
@@ -3459,7 +3459,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3459 transport_kunmap_first_data_page(cmd); 3459 transport_kunmap_first_data_page(cmd);
3460 buf = NULL; 3460 buf = NULL;
3461 3461
3462 printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" 3462 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
3463 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? 3463 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
3464 "port" : "device", initiator_str, (iport_ptr != NULL) ? 3464 "port" : "device", initiator_str, (iport_ptr != NULL) ?
3465 iport_ptr : ""); 3465 iport_ptr : "");
@@ -3474,18 +3474,18 @@ static int core_scsi3_emulate_pro_register_and_move(
3474 pr_reg_nacl = pr_reg->pr_reg_nacl; 3474 pr_reg_nacl = pr_reg->pr_reg_nacl;
3475 matching_iname = (!strcmp(initiator_str, 3475 matching_iname = (!strcmp(initiator_str,
3476 pr_reg_nacl->initiatorname)) ? 1 : 0; 3476 pr_reg_nacl->initiatorname)) ? 1 : 0;
3477 if (!(matching_iname)) 3477 if (!matching_iname)
3478 goto after_iport_check; 3478 goto after_iport_check;
3479 3479
3480 if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) { 3480 if (!iport_ptr || !pr_reg->isid_present_at_reg) {
3481 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" 3481 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
3482 " matches: %s on received I_T Nexus\n", initiator_str, 3482 " matches: %s on received I_T Nexus\n", initiator_str,
3483 pr_reg_nacl->initiatorname); 3483 pr_reg_nacl->initiatorname);
3484 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3484 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3485 goto out; 3485 goto out;
3486 } 3486 }
3487 if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) { 3487 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
3488 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" 3488 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
3489 " matches: %s %s on received I_T Nexus\n", 3489 " matches: %s %s on received I_T Nexus\n",
3490 initiator_str, iport_ptr, pr_reg_nacl->initiatorname, 3490 initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
3491 pr_reg->pr_reg_isid); 3491 pr_reg->pr_reg_isid);
@@ -3505,8 +3505,8 @@ after_iport_check:
3505 } 3505 }
3506 spin_unlock_bh(&dest_se_tpg->acl_node_lock); 3506 spin_unlock_bh(&dest_se_tpg->acl_node_lock);
3507 3507
3508 if (!(dest_node_acl)) { 3508 if (!dest_node_acl) {
3509 printk(KERN_ERR "Unable to locate %s dest_node_acl for" 3509 pr_err("Unable to locate %s dest_node_acl for"
3510 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3510 " TransportID%s\n", dest_tf_ops->get_fabric_name(),
3511 initiator_str); 3511 initiator_str);
3512 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3512 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3514,7 +3514,7 @@ after_iport_check:
3514 } 3514 }
3515 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 3515 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
3516 if (ret != 0) { 3516 if (ret != 0) {
3517 printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for" 3517 pr_err("core_scsi3_nodeacl_depend_item() for"
3518 " dest_node_acl\n"); 3518 " dest_node_acl\n");
3519 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3519 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3520 smp_mb__after_atomic_dec(); 3520 smp_mb__after_atomic_dec();
@@ -3523,7 +3523,7 @@ after_iport_check:
3523 goto out; 3523 goto out;
3524 } 3524 }
3525#if 0 3525#if 0
3526 printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" 3526 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
3527 " %s from TransportID\n", dest_tf_ops->get_fabric_name(), 3527 " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
3528 dest_node_acl->initiatorname); 3528 dest_node_acl->initiatorname);
3529#endif 3529#endif
@@ -3532,8 +3532,8 @@ after_iport_check:
3532 * PORT IDENTIFIER. 3532 * PORT IDENTIFIER.
3533 */ 3533 */
3534 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); 3534 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
3535 if (!(dest_se_deve)) { 3535 if (!dest_se_deve) {
3536 printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:" 3536 pr_err("Unable to locate %s dest_se_deve from RTPI:"
3537 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3537 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
3538 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3538 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3539 goto out; 3539 goto out;
@@ -3541,7 +3541,7 @@ after_iport_check:
3541 3541
3542 ret = core_scsi3_lunacl_depend_item(dest_se_deve); 3542 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
3543 if (ret < 0) { 3543 if (ret < 0) {
3544 printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n"); 3544 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3545 atomic_dec(&dest_se_deve->pr_ref_count); 3545 atomic_dec(&dest_se_deve->pr_ref_count);
3546 smp_mb__after_atomic_dec(); 3546 smp_mb__after_atomic_dec();
3547 dest_se_deve = NULL; 3547 dest_se_deve = NULL;
@@ -3549,7 +3549,7 @@ after_iport_check:
3549 goto out; 3549 goto out;
3550 } 3550 }
3551#if 0 3551#if 0
3552 printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" 3552 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
3553 " ACL for dest_se_deve->mapped_lun: %u\n", 3553 " ACL for dest_se_deve->mapped_lun: %u\n",
3554 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, 3554 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
3555 dest_se_deve->mapped_lun); 3555 dest_se_deve->mapped_lun);
@@ -3560,8 +3560,8 @@ after_iport_check:
3560 */ 3560 */
3561 spin_lock(&dev->dev_reservation_lock); 3561 spin_lock(&dev->dev_reservation_lock);
3562 pr_res_holder = dev->dev_pr_res_holder; 3562 pr_res_holder = dev->dev_pr_res_holder;
3563 if (!(pr_res_holder)) { 3563 if (!pr_res_holder) {
3564 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation" 3564 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
3565 " currently held\n"); 3565 " currently held\n");
3566 spin_unlock(&dev->dev_reservation_lock); 3566 spin_unlock(&dev->dev_reservation_lock);
3567 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 3567 ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
@@ -3574,7 +3574,7 @@ after_iport_check:
3574 * Register behaviors for a REGISTER AND MOVE service action 3574 * Register behaviors for a REGISTER AND MOVE service action
3575 */ 3575 */
3576 if (pr_res_holder != pr_reg) { 3576 if (pr_res_holder != pr_reg) {
3577 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3577 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
3578 " Nexus is not reservation holder\n"); 3578 " Nexus is not reservation holder\n");
3579 spin_unlock(&dev->dev_reservation_lock); 3579 spin_unlock(&dev->dev_reservation_lock);
3580 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3580 ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3591,7 +3591,7 @@ after_iport_check:
3591 */ 3591 */
3592 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 3592 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
3593 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { 3593 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
3594 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move" 3594 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
3595 " reservation for type: %s\n", 3595 " reservation for type: %s\n",
3596 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); 3596 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
3597 spin_unlock(&dev->dev_reservation_lock); 3597 spin_unlock(&dev->dev_reservation_lock);
@@ -3626,7 +3626,7 @@ after_iport_check:
3626 */ 3626 */
3627 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3627 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3628 iport_ptr); 3628 iport_ptr);
3629 if (!(dest_pr_reg)) { 3629 if (!dest_pr_reg) {
3630 ret = core_scsi3_alloc_registration(cmd->se_dev, 3630 ret = core_scsi3_alloc_registration(cmd->se_dev,
3631 dest_node_acl, dest_se_deve, iport_ptr, 3631 dest_node_acl, dest_se_deve, iport_ptr,
3632 sa_res_key, 0, aptpl, 2, 1); 3632 sa_res_key, 0, aptpl, 2, 1);
@@ -3659,16 +3659,16 @@ after_iport_check:
3659 /* 3659 /*
3660 * Increment PRGeneration for existing registrations.. 3660 * Increment PRGeneration for existing registrations..
3661 */ 3661 */
3662 if (!(new_reg)) 3662 if (!new_reg)
3663 dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; 3663 dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
3664 spin_unlock(&dev->dev_reservation_lock); 3664 spin_unlock(&dev->dev_reservation_lock);
3665 3665
3666 printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" 3666 pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
3667 " created new reservation holder TYPE: %s on object RTPI:" 3667 " created new reservation holder TYPE: %s on object RTPI:"
3668 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), 3668 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
3669 core_scsi3_pr_dump_type(type), rtpi, 3669 core_scsi3_pr_dump_type(type), rtpi,
3670 dest_pr_reg->pr_res_generation); 3670 dest_pr_reg->pr_res_generation);
3671 printk(KERN_INFO "SPC-3 PR Successfully moved reservation from" 3671 pr_debug("SPC-3 PR Successfully moved reservation from"
3672 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", 3672 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
3673 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, 3673 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
3674 (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), 3674 (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
@@ -3696,18 +3696,18 @@ after_iport_check:
3696 * Clear the APTPL metadata if APTPL has been disabled, otherwise 3696 * Clear the APTPL metadata if APTPL has been disabled, otherwise
3697 * write out the updated metadata to struct file for this SCSI device. 3697 * write out the updated metadata to struct file for this SCSI device.
3698 */ 3698 */
3699 if (!(aptpl)) { 3699 if (!aptpl) {
3700 pr_tmpl->pr_aptpl_active = 0; 3700 pr_tmpl->pr_aptpl_active = 0;
3701 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 3701 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
3702 printk("SPC-3 PR: Set APTPL Bit Deactivated for" 3702 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
3703 " REGISTER_AND_MOVE\n"); 3703 " REGISTER_AND_MOVE\n");
3704 } else { 3704 } else {
3705 pr_tmpl->pr_aptpl_active = 1; 3705 pr_tmpl->pr_aptpl_active = 1;
3706 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3706 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3707 &dest_pr_reg->pr_aptpl_buf[0], 3707 &dest_pr_reg->pr_aptpl_buf[0],
3708 pr_tmpl->pr_aptpl_buf_len); 3708 pr_tmpl->pr_aptpl_buf_len);
3709 if (!(ret)) 3709 if (!ret)
3710 printk("SPC-3 PR: Set APTPL Bit Activated for" 3710 pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
3711 " REGISTER_AND_MOVE\n"); 3711 " REGISTER_AND_MOVE\n");
3712 } 3712 }
3713 3713
@@ -3750,11 +3750,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3750 * FIXME: A NULL struct se_session pointer means an this is not coming from 3750 * FIXME: A NULL struct se_session pointer means an this is not coming from
3751 * a $FABRIC_MOD's nexus, but from internal passthrough ops. 3751 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
3752 */ 3752 */
3753 if (!(cmd->se_sess)) 3753 if (!cmd->se_sess)
3754 return PYX_TRANSPORT_LU_COMM_FAILURE; 3754 return PYX_TRANSPORT_LU_COMM_FAILURE;
3755 3755
3756 if (cmd->data_length < 24) { 3756 if (cmd->data_length < 24) {
3757 printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list" 3757 pr_warn("SPC-PR: Received PR OUT parameter list"
3758 " length too small: %u\n", cmd->data_length); 3758 " length too small: %u\n", cmd->data_length);
3759 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3759 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3760 } 3760 }
@@ -3800,9 +3800,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3800 * the sense key set to ILLEGAL REQUEST, and the additional sense 3800 * the sense key set to ILLEGAL REQUEST, and the additional sense
3801 * code set to PARAMETER LIST LENGTH ERROR. 3801 * code set to PARAMETER LIST LENGTH ERROR.
3802 */ 3802 */
3803 if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && 3803 if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
3804 (cmd->data_length != 24)) { 3804 (cmd->data_length != 24)) {
3805 printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter" 3805 pr_warn("SPC-PR: Received PR OUT illegal parameter"
3806 " list length: %u\n", cmd->data_length); 3806 " list length: %u\n", cmd->data_length);
3807 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3807 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3808 } 3808 }
@@ -3836,7 +3836,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3836 return core_scsi3_emulate_pro_register_and_move(cmd, res_key, 3836 return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
3837 sa_res_key, aptpl, unreg); 3837 sa_res_key, aptpl, unreg);
3838 default: 3838 default:
3839 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" 3839 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
3840 " action: 0x%02x\n", cdb[1] & 0x1f); 3840 " action: 0x%02x\n", cdb[1] & 0x1f);
3841 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3841 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3842 } 3842 }
@@ -3858,7 +3858,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3858 u32 add_len = 0, off = 8; 3858 u32 add_len = 0, off = 8;
3859 3859
3860 if (cmd->data_length < 8) { 3860 if (cmd->data_length < 8) {
3861 printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u" 3861 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
3862 " too small\n", cmd->data_length); 3862 " too small\n", cmd->data_length);
3863 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3863 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3864 } 3864 }
@@ -3917,7 +3917,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3917 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ 3917 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
3918 3918
3919 if (cmd->data_length < 8) { 3919 if (cmd->data_length < 8) {
3920 printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u" 3920 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
3921 " too small\n", cmd->data_length); 3921 " too small\n", cmd->data_length);
3922 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3922 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3923 } 3923 }
@@ -3999,7 +3999,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3999 u16 add_len = 8; /* Hardcoded to 8. */ 3999 u16 add_len = 8; /* Hardcoded to 8. */
4000 4000
4001 if (cmd->data_length < 6) { 4001 if (cmd->data_length < 6) {
4002 printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:" 4002 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
4003 " %u too small\n", cmd->data_length); 4003 " %u too small\n", cmd->data_length);
4004 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4004 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4005 } 4005 }
@@ -4060,7 +4060,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4060 int format_code = 0; 4060 int format_code = 0;
4061 4061
4062 if (cmd->data_length < 8) { 4062 if (cmd->data_length < 8) {
4063 printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 4063 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
4064 " too small\n", cmd->data_length); 4064 " too small\n", cmd->data_length);
4065 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4065 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4066 } 4066 }
@@ -4091,7 +4091,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4091 se_tpg, se_nacl, pr_reg, &format_code); 4091 se_tpg, se_nacl, pr_reg, &format_code);
4092 4092
4093 if ((exp_desc_len + add_len) > cmd->data_length) { 4093 if ((exp_desc_len + add_len) > cmd->data_length) {
4094 printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran" 4094 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
4095 " out of buffer: %d\n", cmd->data_length); 4095 " out of buffer: %d\n", cmd->data_length);
4096 spin_lock(&pr_tmpl->registration_lock); 4096 spin_lock(&pr_tmpl->registration_lock);
4097 atomic_dec(&pr_reg->pr_res_holders); 4097 atomic_dec(&pr_reg->pr_res_holders);
@@ -4141,7 +4141,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4141 * bit is set to one, the contents of the RELATIVE TARGET PORT 4141 * bit is set to one, the contents of the RELATIVE TARGET PORT
4142 * IDENTIFIER field are not defined by this standard. 4142 * IDENTIFIER field are not defined by this standard.
4143 */ 4143 */
4144 if (!(pr_reg->pr_reg_all_tg_pt)) { 4144 if (!pr_reg->pr_reg_all_tg_pt) {
4145 struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; 4145 struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
4146 4146
4147 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 4147 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
@@ -4203,7 +4203,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
4203 case PRI_READ_FULL_STATUS: 4203 case PRI_READ_FULL_STATUS:
4204 return core_scsi3_pri_read_full_status(cmd); 4204 return core_scsi3_pri_read_full_status(cmd);
4205 default: 4205 default:
4206 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service" 4206 pr_err("Unknown PERSISTENT_RESERVE_IN service"
4207 " action: 0x%02x\n", cdb[1] & 0x1f); 4207 " action: 0x%02x\n", cdb[1] & 0x1f);
4208 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4208 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4209 } 4209 }
@@ -4224,7 +4224,7 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd)
4224 * CONFLICT status. 4224 * CONFLICT status.
4225 */ 4225 */
4226 if (dev->dev_flags & DF_SPC2_RESERVATIONS) { 4226 if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
4227 printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy" 4227 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
4228 " SPC-2 reservation is held, returning" 4228 " SPC-2 reservation is held, returning"
4229 " RESERVATION_CONFLICT\n"); 4229 " RESERVATION_CONFLICT\n");
4230 return PYX_TRANSPORT_RESERVATION_CONFLICT; 4230 return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -4263,7 +4263,7 @@ int core_setup_reservations(struct se_device *dev, int force_pt)
4263 rest->res_type = SPC_PASSTHROUGH; 4263 rest->res_type = SPC_PASSTHROUGH;
4264 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; 4264 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
4265 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; 4265 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
4266 printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" 4266 pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
4267 " emulation\n", dev->transport->name); 4267 " emulation\n", dev->transport->name);
4268 return 0; 4268 return 0;
4269 } 4269 }
@@ -4275,14 +4275,14 @@ int core_setup_reservations(struct se_device *dev, int force_pt)
4275 rest->res_type = SPC3_PERSISTENT_RESERVATIONS; 4275 rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
4276 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; 4276 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
4277 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; 4277 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
4278 printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" 4278 pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
4279 " emulation\n", dev->transport->name); 4279 " emulation\n", dev->transport->name);
4280 } else { 4280 } else {
4281 rest->res_type = SPC2_RESERVATIONS; 4281 rest->res_type = SPC2_RESERVATIONS;
4282 rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; 4282 rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
4283 rest->pr_ops.t10_seq_non_holder = 4283 rest->pr_ops.t10_seq_non_holder =
4284 &core_scsi2_reservation_seq_non_holder; 4284 &core_scsi2_reservation_seq_non_holder;
4285 printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", 4285 pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
4286 dev->transport->name); 4286 dev->transport->name);
4287 } 4287 }
4288 4288
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 318ef14fe37d..a2ce5998d318 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -65,8 +65,8 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
65 struct pscsi_hba_virt *phv; 65 struct pscsi_hba_virt *phv;
66 66
67 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); 67 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
68 if (!(phv)) { 68 if (!phv) {
69 printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); 69 pr_err("Unable to allocate struct pscsi_hba_virt\n");
70 return -ENOMEM; 70 return -ENOMEM;
71 } 71 }
72 phv->phv_host_id = host_id; 72 phv->phv_host_id = host_id;
@@ -74,10 +74,10 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
74 74
75 hba->hba_ptr = phv; 75 hba->hba_ptr = phv;
76 76
77 printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 77 pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
78 " Generic Target Core Stack %s\n", hba->hba_id, 78 " Generic Target Core Stack %s\n", hba->hba_id,
79 PSCSI_VERSION, TARGET_CORE_MOD_VERSION); 79 PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
80 printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n", 80 pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
81 hba->hba_id); 81 hba->hba_id);
82 82
83 return 0; 83 return 0;
@@ -91,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba)
91 if (scsi_host) { 91 if (scsi_host) {
92 scsi_host_put(scsi_host); 92 scsi_host_put(scsi_host);
93 93
94 printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" 94 pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
95 " Generic Target Core\n", hba->hba_id, 95 " Generic Target Core\n", hba->hba_id,
96 (scsi_host->hostt->name) ? (scsi_host->hostt->name) : 96 (scsi_host->hostt->name) ? (scsi_host->hostt->name) :
97 "Unknown"); 97 "Unknown");
98 } else 98 } else
99 printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" 99 pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
100 " from Generic Target Core\n", hba->hba_id); 100 " from Generic Target Core\n", hba->hba_id);
101 101
102 kfree(phv); 102 kfree(phv);
@@ -110,14 +110,14 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
110 /* 110 /*
111 * Release the struct Scsi_Host 111 * Release the struct Scsi_Host
112 */ 112 */
113 if (!(mode_flag)) { 113 if (!mode_flag) {
114 if (!(sh)) 114 if (!sh)
115 return 0; 115 return 0;
116 116
117 phv->phv_lld_host = NULL; 117 phv->phv_lld_host = NULL;
118 phv->phv_mode = PHV_VIRUTAL_HOST_ID; 118 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
119 119
120 printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" 120 pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
121 " %s\n", hba->hba_id, (sh->hostt->name) ? 121 " %s\n", hba->hba_id, (sh->hostt->name) ?
122 (sh->hostt->name) : "Unknown"); 122 (sh->hostt->name) : "Unknown");
123 123
@@ -130,7 +130,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
130 */ 130 */
131 sh = scsi_host_lookup(phv->phv_host_id); 131 sh = scsi_host_lookup(phv->phv_host_id);
132 if (IS_ERR(sh)) { 132 if (IS_ERR(sh)) {
133 printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" 133 pr_err("pSCSI: Unable to locate SCSI Host for"
134 " phv_host_id: %d\n", phv->phv_host_id); 134 " phv_host_id: %d\n", phv->phv_host_id);
135 return PTR_ERR(sh); 135 return PTR_ERR(sh);
136 } 136 }
@@ -138,7 +138,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
138 phv->phv_lld_host = sh; 138 phv->phv_lld_host = sh;
139 phv->phv_mode = PHV_LLD_SCSI_HOST_NO; 139 phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
140 140
141 printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", 141 pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
142 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); 142 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
143 143
144 return 1; 144 return 1;
@@ -257,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
257 page_83 = &buf[off]; 257 page_83 = &buf[off];
258 ident_len = page_83[3]; 258 ident_len = page_83[3];
259 if (!ident_len) { 259 if (!ident_len) {
260 printk(KERN_ERR "page_83[3]: identifier" 260 pr_err("page_83[3]: identifier"
261 " length zero!\n"); 261 " length zero!\n");
262 break; 262 break;
263 } 263 }
264 printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); 264 pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
265 265
266 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); 266 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
267 if (!vpd) { 267 if (!vpd) {
268 printk(KERN_ERR "Unable to allocate memory for" 268 pr_err("Unable to allocate memory for"
269 " struct t10_vpd\n"); 269 " struct t10_vpd\n");
270 goto out; 270 goto out;
271 } 271 }
@@ -317,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list(
317 if (!sd->queue_depth) { 317 if (!sd->queue_depth) {
318 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; 318 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
319 319
320 printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" 320 pr_err("Set broken SCSI Device %d:%d:%d"
321 " queue_depth to %d\n", sd->channel, sd->id, 321 " queue_depth to %d\n", sd->channel, sd->id,
322 sd->lun, sd->queue_depth); 322 sd->lun, sd->queue_depth);
323 } 323 }
@@ -355,7 +355,7 @@ static struct se_device *pscsi_add_device_to_list(
355 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 355 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
356 se_dev, dev_flags, pdv, 356 se_dev, dev_flags, pdv,
357 &dev_limits, NULL, NULL); 357 &dev_limits, NULL, NULL);
358 if (!(dev)) { 358 if (!dev) {
359 pdv->pdv_sd = NULL; 359 pdv->pdv_sd = NULL;
360 return NULL; 360 return NULL;
361 } 361 }
@@ -385,13 +385,13 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
385 struct pscsi_dev_virt *pdv; 385 struct pscsi_dev_virt *pdv;
386 386
387 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); 387 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
388 if (!(pdv)) { 388 if (!pdv) {
389 printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); 389 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
390 return NULL; 390 return NULL;
391 } 391 }
392 pdv->pdv_se_hba = hba; 392 pdv->pdv_se_hba = hba;
393 393
394 printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); 394 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
395 return pdv; 395 return pdv;
396} 396}
397 397
@@ -412,7 +412,7 @@ static struct se_device *pscsi_create_type_disk(
412 u32 dev_flags = 0; 412 u32 dev_flags = 0;
413 413
414 if (scsi_device_get(sd)) { 414 if (scsi_device_get(sd)) {
415 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", 415 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
416 sh->host_no, sd->channel, sd->id, sd->lun); 416 sh->host_no, sd->channel, sd->id, sd->lun);
417 spin_unlock_irq(sh->host_lock); 417 spin_unlock_irq(sh->host_lock);
418 return NULL; 418 return NULL;
@@ -425,19 +425,19 @@ static struct se_device *pscsi_create_type_disk(
425 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 425 bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
426 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 426 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
427 if (IS_ERR(bd)) { 427 if (IS_ERR(bd)) {
428 printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); 428 pr_err("pSCSI: blkdev_get_by_path() failed\n");
429 scsi_device_put(sd); 429 scsi_device_put(sd);
430 return NULL; 430 return NULL;
431 } 431 }
432 pdv->pdv_bd = bd; 432 pdv->pdv_bd = bd;
433 433
434 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 434 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
435 if (!(dev)) { 435 if (!dev) {
436 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 436 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
437 scsi_device_put(sd); 437 scsi_device_put(sd);
438 return NULL; 438 return NULL;
439 } 439 }
440 printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", 440 pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
441 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 441 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
442 442
443 return dev; 443 return dev;
@@ -459,7 +459,7 @@ static struct se_device *pscsi_create_type_rom(
459 u32 dev_flags = 0; 459 u32 dev_flags = 0;
460 460
461 if (scsi_device_get(sd)) { 461 if (scsi_device_get(sd)) {
462 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", 462 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
463 sh->host_no, sd->channel, sd->id, sd->lun); 463 sh->host_no, sd->channel, sd->id, sd->lun);
464 spin_unlock_irq(sh->host_lock); 464 spin_unlock_irq(sh->host_lock);
465 return NULL; 465 return NULL;
@@ -467,11 +467,11 @@ static struct se_device *pscsi_create_type_rom(
467 spin_unlock_irq(sh->host_lock); 467 spin_unlock_irq(sh->host_lock);
468 468
469 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 469 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
470 if (!(dev)) { 470 if (!dev) {
471 scsi_device_put(sd); 471 scsi_device_put(sd);
472 return NULL; 472 return NULL;
473 } 473 }
474 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 474 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
475 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 475 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
476 sd->channel, sd->id, sd->lun); 476 sd->channel, sd->id, sd->lun);
477 477
@@ -495,10 +495,10 @@ static struct se_device *pscsi_create_type_other(
495 495
496 spin_unlock_irq(sh->host_lock); 496 spin_unlock_irq(sh->host_lock);
497 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 497 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
498 if (!(dev)) 498 if (!dev)
499 return NULL; 499 return NULL;
500 500
501 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 501 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
502 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 502 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
503 sd->channel, sd->id, sd->lun); 503 sd->channel, sd->id, sd->lun);
504 504
@@ -517,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice(
517 struct Scsi_Host *sh = phv->phv_lld_host; 517 struct Scsi_Host *sh = phv->phv_lld_host;
518 int legacy_mode_enable = 0; 518 int legacy_mode_enable = 0;
519 519
520 if (!(pdv)) { 520 if (!pdv) {
521 printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" 521 pr_err("Unable to locate struct pscsi_dev_virt"
522 " parameter\n"); 522 " parameter\n");
523 return ERR_PTR(-EINVAL); 523 return ERR_PTR(-EINVAL);
524 } 524 }
@@ -526,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice(
526 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 526 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
527 * struct Scsi_Host we will need to bring the TCM/pSCSI object online 527 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
528 */ 528 */
529 if (!(sh)) { 529 if (!sh) {
530 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 530 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
531 printk(KERN_ERR "pSCSI: Unable to locate struct" 531 pr_err("pSCSI: Unable to locate struct"
532 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 532 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
533 return ERR_PTR(-ENODEV); 533 return ERR_PTR(-ENODEV);
534 } 534 }
@@ -537,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice(
537 * reference, we enforce that udev_path has been set 537 * reference, we enforce that udev_path has been set
538 */ 538 */
539 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { 539 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
540 printk(KERN_ERR "pSCSI: udev_path attribute has not" 540 pr_err("pSCSI: udev_path attribute has not"
541 " been set before ENABLE=1\n"); 541 " been set before ENABLE=1\n");
542 return ERR_PTR(-EINVAL); 542 return ERR_PTR(-EINVAL);
543 } 543 }
@@ -548,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice(
548 */ 548 */
549 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 549 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
550 spin_lock(&hba->device_lock); 550 spin_lock(&hba->device_lock);
551 if (!(list_empty(&hba->hba_dev_list))) { 551 if (!list_empty(&hba->hba_dev_list)) {
552 printk(KERN_ERR "pSCSI: Unable to set hba_mode" 552 pr_err("pSCSI: Unable to set hba_mode"
553 " with active devices\n"); 553 " with active devices\n");
554 spin_unlock(&hba->device_lock); 554 spin_unlock(&hba->device_lock);
555 return ERR_PTR(-EEXIST); 555 return ERR_PTR(-EEXIST);
@@ -565,14 +565,14 @@ static struct se_device *pscsi_create_virtdevice(
565 } else { 565 } else {
566 sh = scsi_host_lookup(pdv->pdv_host_id); 566 sh = scsi_host_lookup(pdv->pdv_host_id);
567 if (IS_ERR(sh)) { 567 if (IS_ERR(sh)) {
568 printk(KERN_ERR "pSCSI: Unable to locate" 568 pr_err("pSCSI: Unable to locate"
569 " pdv_host_id: %d\n", pdv->pdv_host_id); 569 " pdv_host_id: %d\n", pdv->pdv_host_id);
570 return (struct se_device *) sh; 570 return (struct se_device *) sh;
571 } 571 }
572 } 572 }
573 } else { 573 } else {
574 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { 574 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
575 printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" 575 pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
576 " struct Scsi_Host exists\n"); 576 " struct Scsi_Host exists\n");
577 return ERR_PTR(-EEXIST); 577 return ERR_PTR(-EEXIST);
578 } 578 }
@@ -601,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice(
601 break; 601 break;
602 } 602 }
603 603
604 if (!(dev)) { 604 if (!dev) {
605 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 605 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
606 scsi_host_put(sh); 606 scsi_host_put(sh);
607 else if (legacy_mode_enable) { 607 else if (legacy_mode_enable) {
@@ -615,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice(
615 } 615 }
616 spin_unlock_irq(sh->host_lock); 616 spin_unlock_irq(sh->host_lock);
617 617
618 printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, 618 pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
619 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); 619 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
620 620
621 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 621 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
@@ -729,8 +729,8 @@ after_mode_sense:
729 u32 blocksize; 729 u32 blocksize;
730 730
731 buf = sg_virt(&sg[0]); 731 buf = sg_virt(&sg[0]);
732 if (!(buf)) { 732 if (!buf) {
733 printk(KERN_ERR "Unable to get buf for scatterlist\n"); 733 pr_err("Unable to get buf for scatterlist\n");
734 goto after_mode_select; 734 goto after_mode_select;
735 } 735 }
736 736
@@ -760,34 +760,20 @@ after_mode_select:
760} 760}
761 761
762static struct se_task * 762static struct se_task *
763pscsi_alloc_task(struct se_cmd *cmd) 763pscsi_alloc_task(unsigned char *cdb)
764{ 764{
765 struct pscsi_plugin_task *pt; 765 struct pscsi_plugin_task *pt;
766 unsigned char *cdb = cmd->t_task_cdb;
767 766
768 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); 767 /*
768 * Dynamically alloc cdb space, since it may be larger than
769 * TCM_MAX_COMMAND_SIZE
770 */
771 pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
769 if (!pt) { 772 if (!pt) {
770 printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); 773 pr_err("Unable to allocate struct pscsi_plugin_task\n");
771 return NULL; 774 return NULL;
772 } 775 }
773 776
774 /*
775 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
776 * allocate the extended CDB buffer for per struct se_task context
777 * pt->pscsi_cdb now.
778 */
779 if (cmd->t_task_cdb != cmd->__t_task_cdb) {
780
781 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
782 if (!(pt->pscsi_cdb)) {
783 printk(KERN_ERR "pSCSI: Unable to allocate extended"
784 " pt->pscsi_cdb\n");
785 kfree(pt);
786 return NULL;
787 }
788 } else
789 pt->pscsi_cdb = &pt->__pscsi_cdb[0];
790
791 return &pt->pscsi_task; 777 return &pt->pscsi_task;
792} 778}
793 779
@@ -837,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task)
837 pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, 823 pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
838 (task->task_data_direction == DMA_TO_DEVICE), 824 (task->task_data_direction == DMA_TO_DEVICE),
839 GFP_KERNEL); 825 GFP_KERNEL);
840 if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { 826 if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
841 printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", 827 pr_err("PSCSI: blk_get_request() failed: %ld\n",
842 IS_ERR(pt->pscsi_req)); 828 IS_ERR(pt->pscsi_req));
843 return PYX_TRANSPORT_LU_COMM_FAILURE; 829 return PYX_TRANSPORT_LU_COMM_FAILURE;
844 } 830 }
@@ -883,15 +869,8 @@ static int pscsi_do_task(struct se_task *task)
883static void pscsi_free_task(struct se_task *task) 869static void pscsi_free_task(struct se_task *task)
884{ 870{
885 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 871 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
886 struct se_cmd *cmd = task->task_se_cmd;
887 872
888 /* 873 /*
889 * Release the extended CDB allocation from pscsi_alloc_task()
890 * if one exists.
891 */
892 if (cmd->t_task_cdb != cmd->__t_task_cdb)
893 kfree(pt->pscsi_cdb);
894 /*
895 * We do not release the bio(s) here associated with this task, as 874 * We do not release the bio(s) here associated with this task, as
896 * this is handled by bio_put() and pscsi_bi_endio(). 875 * this is handled by bio_put() and pscsi_bi_endio().
897 */ 876 */
@@ -936,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
936 switch (token) { 915 switch (token) {
937 case Opt_scsi_host_id: 916 case Opt_scsi_host_id:
938 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 917 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
939 printk(KERN_ERR "PSCSI[%d]: Unable to accept" 918 pr_err("PSCSI[%d]: Unable to accept"
940 " scsi_host_id while phv_mode ==" 919 " scsi_host_id while phv_mode =="
941 " PHV_LLD_SCSI_HOST_NO\n", 920 " PHV_LLD_SCSI_HOST_NO\n",
942 phv->phv_host_id); 921 phv->phv_host_id);
@@ -945,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
945 } 924 }
946 match_int(args, &arg); 925 match_int(args, &arg);
947 pdv->pdv_host_id = arg; 926 pdv->pdv_host_id = arg;
948 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" 927 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
949 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 928 " %d\n", phv->phv_host_id, pdv->pdv_host_id);
950 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 929 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
951 break; 930 break;
952 case Opt_scsi_channel_id: 931 case Opt_scsi_channel_id:
953 match_int(args, &arg); 932 match_int(args, &arg);
954 pdv->pdv_channel_id = arg; 933 pdv->pdv_channel_id = arg;
955 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" 934 pr_debug("PSCSI[%d]: Referencing SCSI Channel"
956 " ID: %d\n", phv->phv_host_id, 935 " ID: %d\n", phv->phv_host_id,
957 pdv->pdv_channel_id); 936 pdv->pdv_channel_id);
958 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 937 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
@@ -960,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
960 case Opt_scsi_target_id: 939 case Opt_scsi_target_id:
961 match_int(args, &arg); 940 match_int(args, &arg);
962 pdv->pdv_target_id = arg; 941 pdv->pdv_target_id = arg;
963 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" 942 pr_debug("PSCSI[%d]: Referencing SCSI Target"
964 " ID: %d\n", phv->phv_host_id, 943 " ID: %d\n", phv->phv_host_id,
965 pdv->pdv_target_id); 944 pdv->pdv_target_id);
966 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 945 pdv->pdv_flags |= PDF_HAS_TARGET_ID;
@@ -968,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
968 case Opt_scsi_lun_id: 947 case Opt_scsi_lun_id:
969 match_int(args, &arg); 948 match_int(args, &arg);
970 pdv->pdv_lun_id = arg; 949 pdv->pdv_lun_id = arg;
971 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" 950 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
972 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 951 " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
973 pdv->pdv_flags |= PDF_HAS_LUN_ID; 952 pdv->pdv_flags |= PDF_HAS_LUN_ID;
974 break; 953 break;
@@ -991,7 +970,7 @@ static ssize_t pscsi_check_configfs_dev_params(
991 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 970 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
992 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 971 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
993 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 972 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
994 printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" 973 pr_err("Missing scsi_channel_id=, scsi_target_id= and"
995 " scsi_lun_id= parameters\n"); 974 " scsi_lun_id= parameters\n");
996 return -EINVAL; 975 return -EINVAL;
997 } 976 }
@@ -1061,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(int sg_num)
1061 * in block/blk-core.c:blk_make_request() 1040 * in block/blk-core.c:blk_make_request()
1062 */ 1041 */
1063 bio = bio_kmalloc(GFP_KERNEL, sg_num); 1042 bio = bio_kmalloc(GFP_KERNEL, sg_num);
1064 if (!(bio)) { 1043 if (!bio) {
1065 printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); 1044 pr_err("PSCSI: bio_kmalloc() failed\n");
1066 return NULL; 1045 return NULL;
1067 } 1046 }
1068 bio->bi_end_io = pscsi_bi_endio; 1047 bio->bi_end_io = pscsi_bi_endio;
@@ -1070,12 +1049,6 @@ static inline struct bio *pscsi_get_bio(int sg_num)
1070 return bio; 1049 return bio;
1071} 1050}
1072 1051
1073#if 0
1074#define DEBUG_PSCSI(x...) printk(x)
1075#else
1076#define DEBUG_PSCSI(x...)
1077#endif
1078
1079static int __pscsi_map_task_SG( 1052static int __pscsi_map_task_SG(
1080 struct se_task *task, 1053 struct se_task *task,
1081 struct scatterlist *task_sg, 1054 struct scatterlist *task_sg,
@@ -1106,34 +1079,34 @@ static int __pscsi_map_task_SG(
1106 * is ported to upstream SCSI passthrough functionality that accepts 1079 * is ported to upstream SCSI passthrough functionality that accepts
1107 * struct scatterlist->page_link or struct page as a paraemeter. 1080 * struct scatterlist->page_link or struct page as a paraemeter.
1108 */ 1081 */
1109 DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); 1082 pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
1110 1083
1111 for_each_sg(task_sg, sg, task_sg_num, i) { 1084 for_each_sg(task_sg, sg, task_sg_num, i) {
1112 page = sg_page(sg); 1085 page = sg_page(sg);
1113 off = sg->offset; 1086 off = sg->offset;
1114 len = sg->length; 1087 len = sg->length;
1115 1088
1116 DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, 1089 pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
1117 page, len, off); 1090 page, len, off);
1118 1091
1119 while (len > 0 && data_len > 0) { 1092 while (len > 0 && data_len > 0) {
1120 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 1093 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
1121 bytes = min(bytes, data_len); 1094 bytes = min(bytes, data_len);
1122 1095
1123 if (!(bio)) { 1096 if (!bio) {
1124 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 1097 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
1125 nr_pages -= nr_vecs; 1098 nr_pages -= nr_vecs;
1126 /* 1099 /*
1127 * Calls bio_kmalloc() and sets bio->bi_end_io() 1100 * Calls bio_kmalloc() and sets bio->bi_end_io()
1128 */ 1101 */
1129 bio = pscsi_get_bio(nr_vecs); 1102 bio = pscsi_get_bio(nr_vecs);
1130 if (!(bio)) 1103 if (!bio)
1131 goto fail; 1104 goto fail;
1132 1105
1133 if (rw) 1106 if (rw)
1134 bio->bi_rw |= REQ_WRITE; 1107 bio->bi_rw |= REQ_WRITE;
1135 1108
1136 DEBUG_PSCSI("PSCSI: Allocated bio: %p," 1109 pr_debug("PSCSI: Allocated bio: %p,"
1137 " dir: %s nr_vecs: %d\n", bio, 1110 " dir: %s nr_vecs: %d\n", bio,
1138 (rw) ? "rw" : "r", nr_vecs); 1111 (rw) ? "rw" : "r", nr_vecs);
1139 /* 1112 /*
@@ -1148,7 +1121,7 @@ static int __pscsi_map_task_SG(
1148 tbio = tbio->bi_next = bio; 1121 tbio = tbio->bi_next = bio;
1149 } 1122 }
1150 1123
1151 DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" 1124 pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
1152 " bio: %p page: %p len: %d off: %d\n", i, bio, 1125 " bio: %p page: %p len: %d off: %d\n", i, bio,
1153 page, len, off); 1126 page, len, off);
1154 1127
@@ -1157,11 +1130,11 @@ static int __pscsi_map_task_SG(
1157 if (rc != bytes) 1130 if (rc != bytes)
1158 goto fail; 1131 goto fail;
1159 1132
1160 DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", 1133 pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
1161 bio->bi_vcnt, nr_vecs); 1134 bio->bi_vcnt, nr_vecs);
1162 1135
1163 if (bio->bi_vcnt > nr_vecs) { 1136 if (bio->bi_vcnt > nr_vecs) {
1164 DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" 1137 pr_debug("PSCSI: Reached bio->bi_vcnt max:"
1165 " %d i: %d bio: %p, allocating another" 1138 " %d i: %d bio: %p, allocating another"
1166 " bio\n", bio->bi_vcnt, i, bio); 1139 " bio\n", bio->bi_vcnt, i, bio);
1167 /* 1140 /*
@@ -1183,15 +1156,15 @@ static int __pscsi_map_task_SG(
1183 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND 1156 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
1184 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] 1157 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
1185 */ 1158 */
1186 if (!(bidi_read)) { 1159 if (!bidi_read) {
1187 /* 1160 /*
1188 * Starting with v2.6.31, call blk_make_request() passing in *hbio to 1161 * Starting with v2.6.31, call blk_make_request() passing in *hbio to
1189 * allocate the pSCSI task a struct request. 1162 * allocate the pSCSI task a struct request.
1190 */ 1163 */
1191 pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, 1164 pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
1192 hbio, GFP_KERNEL); 1165 hbio, GFP_KERNEL);
1193 if (!(pt->pscsi_req)) { 1166 if (!pt->pscsi_req) {
1194 printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); 1167 pr_err("pSCSI: blk_make_request() failed\n");
1195 goto fail; 1168 goto fail;
1196 } 1169 }
1197 /* 1170 /*
@@ -1200,7 +1173,7 @@ static int __pscsi_map_task_SG(
1200 */ 1173 */
1201 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); 1174 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
1202 1175
1203 return task->task_sg_num; 1176 return task->task_sg_nents;
1204 } 1177 }
1205 /* 1178 /*
1206 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND 1179 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
@@ -1208,13 +1181,13 @@ static int __pscsi_map_task_SG(
1208 */ 1181 */
1209 pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, 1182 pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
1210 hbio, GFP_KERNEL); 1183 hbio, GFP_KERNEL);
1211 if (!(pt->pscsi_req->next_rq)) { 1184 if (!pt->pscsi_req->next_rq) {
1212 printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); 1185 pr_err("pSCSI: blk_make_request() failed for BIDI\n");
1213 goto fail; 1186 goto fail;
1214 } 1187 }
1215 pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); 1188 pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
1216 1189
1217 return task->task_sg_num; 1190 return task->task_sg_nents;
1218fail: 1191fail:
1219 while (hbio) { 1192 while (hbio) {
1220 bio = hbio; 1193 bio = hbio;
@@ -1233,14 +1206,14 @@ static int pscsi_map_task_SG(struct se_task *task)
1233 * Setup the main struct request for the task->task_sg[] payload 1206 * Setup the main struct request for the task->task_sg[] payload
1234 */ 1207 */
1235 1208
1236 ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); 1209 ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_nents, 0);
1237 if (ret >= 0 && task->task_sg_bidi) { 1210 if (ret >= 0 && task->task_sg_bidi) {
1238 /* 1211 /*
1239 * If present, set up the extra BIDI-COMMAND SCSI READ 1212 * If present, set up the extra BIDI-COMMAND SCSI READ
1240 * struct request and payload. 1213 * struct request and payload.
1241 */ 1214 */
1242 ret = __pscsi_map_task_SG(task, task->task_sg_bidi, 1215 ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
1243 task->task_sg_num, 1); 1216 task->task_sg_nents, 1);
1244 } 1217 }
1245 1218
1246 if (ret < 0) 1219 if (ret < 0)
@@ -1319,9 +1292,9 @@ static inline void pscsi_process_SAM_status(
1319 struct pscsi_plugin_task *pt) 1292 struct pscsi_plugin_task *pt)
1320{ 1293{
1321 task->task_scsi_status = status_byte(pt->pscsi_result); 1294 task->task_scsi_status = status_byte(pt->pscsi_result);
1322 if ((task->task_scsi_status)) { 1295 if (task->task_scsi_status) {
1323 task->task_scsi_status <<= 1; 1296 task->task_scsi_status <<= 1;
1324 printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" 1297 pr_debug("PSCSI Status Byte exception at task: %p CDB:"
1325 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1298 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1326 pt->pscsi_result); 1299 pt->pscsi_result);
1327 } 1300 }
@@ -1331,7 +1304,7 @@ static inline void pscsi_process_SAM_status(
1331 transport_complete_task(task, (!task->task_scsi_status)); 1304 transport_complete_task(task, (!task->task_scsi_status));
1332 break; 1305 break;
1333 default: 1306 default:
1334 printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" 1307 pr_debug("PSCSI Host Byte exception at task: %p CDB:"
1335 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1308 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1336 pt->pscsi_result); 1309 pt->pscsi_result);
1337 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 1310 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 280b689379c3..ebf4f1ae2c83 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -23,13 +23,12 @@
23 23
24struct pscsi_plugin_task { 24struct pscsi_plugin_task {
25 struct se_task pscsi_task; 25 struct se_task pscsi_task;
26 unsigned char *pscsi_cdb;
27 unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
28 unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; 26 unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
29 int pscsi_direction; 27 int pscsi_direction;
30 int pscsi_result; 28 int pscsi_result;
31 u32 pscsi_resid; 29 u32 pscsi_resid;
32 struct request *pscsi_req; 30 struct request *pscsi_req;
31 unsigned char pscsi_cdb[0];
33} ____cacheline_aligned; 32} ____cacheline_aligned;
34 33
35#define PDF_HAS_CHANNEL_ID 0x01 34#define PDF_HAS_CHANNEL_ID 0x01
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4f9416d5c028..3dd81d24d9a9 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -44,12 +44,8 @@
44 44
45#include "target_core_rd.h" 45#include "target_core_rd.h"
46 46
47static struct se_subsystem_api rd_dr_template;
48static struct se_subsystem_api rd_mcp_template; 47static struct se_subsystem_api rd_mcp_template;
49 48
50/* #define DEBUG_RAMDISK_MCP */
51/* #define DEBUG_RAMDISK_DR */
52
53/* rd_attach_hba(): (Part of se_subsystem_api_t template) 49/* rd_attach_hba(): (Part of se_subsystem_api_t template)
54 * 50 *
55 * 51 *
@@ -59,8 +55,8 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
59 struct rd_host *rd_host; 55 struct rd_host *rd_host;
60 56
61 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); 57 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
62 if (!(rd_host)) { 58 if (!rd_host) {
63 printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); 59 pr_err("Unable to allocate memory for struct rd_host\n");
64 return -ENOMEM; 60 return -ENOMEM;
65 } 61 }
66 62
@@ -68,10 +64,10 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
68 64
69 hba->hba_ptr = rd_host; 65 hba->hba_ptr = rd_host;
70 66
71 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 67 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
72 " Generic Target Core Stack %s\n", hba->hba_id, 68 " Generic Target Core Stack %s\n", hba->hba_id,
73 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); 69 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
74 printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" 70 pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
75 " MaxSectors: %u\n", hba->hba_id, 71 " MaxSectors: %u\n", hba->hba_id,
76 rd_host->rd_host_id, RD_MAX_SECTORS); 72 rd_host->rd_host_id, RD_MAX_SECTORS);
77 73
@@ -82,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba)
82{ 78{
83 struct rd_host *rd_host = hba->hba_ptr; 79 struct rd_host *rd_host = hba->hba_ptr;
84 80
85 printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" 81 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
86 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); 82 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
87 83
88 kfree(rd_host); 84 kfree(rd_host);
@@ -111,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
111 107
112 for (j = 0; j < sg_per_table; j++) { 108 for (j = 0; j < sg_per_table; j++) {
113 pg = sg_page(&sg[j]); 109 pg = sg_page(&sg[j]);
114 if ((pg)) { 110 if (pg) {
115 __free_page(pg); 111 __free_page(pg);
116 page_count++; 112 page_count++;
117 } 113 }
@@ -120,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
120 kfree(sg); 116 kfree(sg);
121 } 117 }
122 118
123 printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" 119 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
124 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 120 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
125 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 121 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
126 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 122 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
@@ -145,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
145 struct scatterlist *sg; 141 struct scatterlist *sg;
146 142
147 if (rd_dev->rd_page_count <= 0) { 143 if (rd_dev->rd_page_count <= 0) {
148 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", 144 pr_err("Illegal page count: %u for Ramdisk device\n",
149 rd_dev->rd_page_count); 145 rd_dev->rd_page_count);
150 return -EINVAL; 146 return -EINVAL;
151 } 147 }
@@ -154,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
154 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 150 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
155 151
156 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); 152 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
157 if (!(sg_table)) { 153 if (!sg_table) {
158 printk(KERN_ERR "Unable to allocate memory for Ramdisk" 154 pr_err("Unable to allocate memory for Ramdisk"
159 " scatterlist tables\n"); 155 " scatterlist tables\n");
160 return -ENOMEM; 156 return -ENOMEM;
161 } 157 }
@@ -169,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
169 165
170 sg = kzalloc(sg_per_table * sizeof(struct scatterlist), 166 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
171 GFP_KERNEL); 167 GFP_KERNEL);
172 if (!(sg)) { 168 if (!sg) {
173 printk(KERN_ERR "Unable to allocate scatterlist array" 169 pr_err("Unable to allocate scatterlist array"
174 " for struct rd_dev\n"); 170 " for struct rd_dev\n");
175 return -ENOMEM; 171 return -ENOMEM;
176 } 172 }
177 173
178 sg_init_table((struct scatterlist *)&sg[0], sg_per_table); 174 sg_init_table(sg, sg_per_table);
179 175
180 sg_table[i].sg_table = sg; 176 sg_table[i].sg_table = sg;
181 sg_table[i].rd_sg_count = sg_per_table; 177 sg_table[i].rd_sg_count = sg_per_table;
@@ -185,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
185 181
186 for (j = 0; j < sg_per_table; j++) { 182 for (j = 0; j < sg_per_table; j++) {
187 pg = alloc_pages(GFP_KERNEL, 0); 183 pg = alloc_pages(GFP_KERNEL, 0);
188 if (!(pg)) { 184 if (!pg) {
189 printk(KERN_ERR "Unable to allocate scatterlist" 185 pr_err("Unable to allocate scatterlist"
190 " pages for struct rd_dev_sg_table\n"); 186 " pages for struct rd_dev_sg_table\n");
191 return -ENOMEM; 187 return -ENOMEM;
192 } 188 }
@@ -198,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
198 total_sg_needed -= sg_per_table; 194 total_sg_needed -= sg_per_table;
199 } 195 }
200 196
201 printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 197 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
202 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 198 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
203 rd_dev->rd_dev_id, rd_dev->rd_page_count, 199 rd_dev->rd_dev_id, rd_dev->rd_page_count,
204 rd_dev->sg_table_count); 200 rd_dev->sg_table_count);
@@ -215,8 +211,8 @@ static void *rd_allocate_virtdevice(
215 struct rd_host *rd_host = hba->hba_ptr; 211 struct rd_host *rd_host = hba->hba_ptr;
216 212
217 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); 213 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
218 if (!(rd_dev)) { 214 if (!rd_dev) {
219 printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); 215 pr_err("Unable to allocate memory for struct rd_dev\n");
220 return NULL; 216 return NULL;
221 } 217 }
222 218
@@ -226,11 +222,6 @@ static void *rd_allocate_virtdevice(
226 return rd_dev; 222 return rd_dev;
227} 223}
228 224
229static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
230{
231 return rd_allocate_virtdevice(hba, name, 1);
232}
233
234static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) 225static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
235{ 226{
236 return rd_allocate_virtdevice(hba, name, 0); 227 return rd_allocate_virtdevice(hba, name, 0);
@@ -270,16 +261,15 @@ static struct se_device *rd_create_virtdevice(
270 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; 261 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
271 262
272 dev = transport_add_device_to_core_hba(hba, 263 dev = transport_add_device_to_core_hba(hba,
273 (rd_dev->rd_direct) ? &rd_dr_template :
274 &rd_mcp_template, se_dev, dev_flags, rd_dev, 264 &rd_mcp_template, se_dev, dev_flags, rd_dev,
275 &dev_limits, prod, rev); 265 &dev_limits, prod, rev);
276 if (!(dev)) 266 if (!dev)
277 goto fail; 267 goto fail;
278 268
279 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 269 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
280 rd_dev->rd_queue_depth = dev->queue_depth; 270 rd_dev->rd_queue_depth = dev->queue_depth;
281 271
282 printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" 272 pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
283 " %u pages in %u tables, %lu total bytes\n", 273 " %u pages in %u tables, %lu total bytes\n",
284 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : 274 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
285 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, 275 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
@@ -293,14 +283,6 @@ fail:
293 return ERR_PTR(ret); 283 return ERR_PTR(ret);
294} 284}
295 285
296static struct se_device *rd_DIRECT_create_virtdevice(
297 struct se_hba *hba,
298 struct se_subsystem_dev *se_dev,
299 void *p)
300{
301 return rd_create_virtdevice(hba, se_dev, p, 1);
302}
303
304static struct se_device *rd_MEMCPY_create_virtdevice( 286static struct se_device *rd_MEMCPY_create_virtdevice(
305 struct se_hba *hba, 287 struct se_hba *hba,
306 struct se_subsystem_dev *se_dev, 288 struct se_subsystem_dev *se_dev,
@@ -327,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task)
327} 309}
328 310
329static struct se_task * 311static struct se_task *
330rd_alloc_task(struct se_cmd *cmd) 312rd_alloc_task(unsigned char *cdb)
331{ 313{
332 struct rd_request *rd_req; 314 struct rd_request *rd_req;
333 315
334 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); 316 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
335 if (!rd_req) { 317 if (!rd_req) {
336 printk(KERN_ERR "Unable to allocate struct rd_request\n"); 318 pr_err("Unable to allocate struct rd_request\n");
337 return NULL; 319 return NULL;
338 } 320 }
339 rd_req->rd_dev = cmd->se_dev->dev_ptr;
340 321
341 return &rd_req->rd_task; 322 return &rd_req->rd_task;
342} 323}
@@ -357,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
357 return sg_table; 338 return sg_table;
358 } 339 }
359 340
360 printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", 341 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
361 page); 342 page);
362 343
363 return NULL; 344 return NULL;
@@ -370,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
370static int rd_MEMCPY_read(struct rd_request *req) 351static int rd_MEMCPY_read(struct rd_request *req)
371{ 352{
372 struct se_task *task = &req->rd_task; 353 struct se_task *task = &req->rd_task;
373 struct rd_dev *dev = req->rd_dev; 354 struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
374 struct rd_dev_sg_table *table; 355 struct rd_dev_sg_table *table;
375 struct scatterlist *sg_d, *sg_s; 356 struct scatterlist *sg_d, *sg_s;
376 void *dst, *src; 357 void *dst, *src;
@@ -379,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req)
379 u32 rd_offset = req->rd_offset; 360 u32 rd_offset = req->rd_offset;
380 361
381 table = rd_get_sg_table(dev, req->rd_page); 362 table = rd_get_sg_table(dev, req->rd_page);
382 if (!(table)) 363 if (!table)
383 return -EINVAL; 364 return -EINVAL;
384 365
385 table_sg_end = (table->page_end_offset - req->rd_page); 366 table_sg_end = (table->page_end_offset - req->rd_page);
386 sg_d = task->task_sg; 367 sg_d = task->task_sg;
387 sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; 368 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
388#ifdef DEBUG_RAMDISK_MCP 369
389 printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" 370 pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
390 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 371 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
391 req->rd_page, req->rd_offset); 372 req->rd_page, req->rd_offset);
392#endif 373
393 src_offset = rd_offset; 374 src_offset = rd_offset;
394 375
395 while (req->rd_size) { 376 while (req->rd_size) {
396 if ((sg_d[i].length - dst_offset) < 377 if ((sg_d[i].length - dst_offset) <
397 (sg_s[j].length - src_offset)) { 378 (sg_s[j].length - src_offset)) {
398 length = (sg_d[i].length - dst_offset); 379 length = (sg_d[i].length - dst_offset);
399#ifdef DEBUG_RAMDISK_MCP 380
400 printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" 381 pr_debug("Step 1 - sg_d[%d]: %p length: %d"
401 " offset: %u sg_s[%d].length: %u\n", i, 382 " offset: %u sg_s[%d].length: %u\n", i,
402 &sg_d[i], sg_d[i].length, sg_d[i].offset, j, 383 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
403 sg_s[j].length); 384 sg_s[j].length);
404 printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" 385 pr_debug("Step 1 - length: %u dst_offset: %u"
405 " src_offset: %u\n", length, dst_offset, 386 " src_offset: %u\n", length, dst_offset,
406 src_offset); 387 src_offset);
407#endif 388
408 if (length > req->rd_size) 389 if (length > req->rd_size)
409 length = req->rd_size; 390 length = req->rd_size;
410 391
@@ -421,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req)
421 page_end = 0; 402 page_end = 0;
422 } else { 403 } else {
423 length = (sg_s[j].length - src_offset); 404 length = (sg_s[j].length - src_offset);
424#ifdef DEBUG_RAMDISK_MCP 405
425 printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" 406 pr_debug("Step 2 - sg_d[%d]: %p length: %d"
426 " offset: %u sg_s[%d].length: %u\n", i, 407 " offset: %u sg_s[%d].length: %u\n", i,
427 &sg_d[i], sg_d[i].length, sg_d[i].offset, 408 &sg_d[i], sg_d[i].length, sg_d[i].offset,
428 j, sg_s[j].length); 409 j, sg_s[j].length);
429 printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" 410 pr_debug("Step 2 - length: %u dst_offset: %u"
430 " src_offset: %u\n", length, dst_offset, 411 " src_offset: %u\n", length, dst_offset,
431 src_offset); 412 src_offset);
432#endif 413
433 if (length > req->rd_size) 414 if (length > req->rd_size)
434 length = req->rd_size; 415 length = req->rd_size;
435 416
@@ -453,31 +434,28 @@ static int rd_MEMCPY_read(struct rd_request *req)
453 434
454 memcpy(dst, src, length); 435 memcpy(dst, src, length);
455 436
456#ifdef DEBUG_RAMDISK_MCP 437 pr_debug("page: %u, remaining size: %u, length: %u,"
457 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
458 " i: %u, j: %u\n", req->rd_page, 438 " i: %u, j: %u\n", req->rd_page,
459 (req->rd_size - length), length, i, j); 439 (req->rd_size - length), length, i, j);
460#endif 440
461 req->rd_size -= length; 441 req->rd_size -= length;
462 if (!(req->rd_size)) 442 if (!req->rd_size)
463 return 0; 443 return 0;
464 444
465 if (!page_end) 445 if (!page_end)
466 continue; 446 continue;
467 447
468 if (++req->rd_page <= table->page_end_offset) { 448 if (++req->rd_page <= table->page_end_offset) {
469#ifdef DEBUG_RAMDISK_MCP 449 pr_debug("page: %u in same page table\n",
470 printk(KERN_INFO "page: %u in same page table\n",
471 req->rd_page); 450 req->rd_page);
472#endif
473 continue; 451 continue;
474 } 452 }
475#ifdef DEBUG_RAMDISK_MCP 453
476 printk(KERN_INFO "getting new page table for page: %u\n", 454 pr_debug("getting new page table for page: %u\n",
477 req->rd_page); 455 req->rd_page);
478#endif 456
479 table = rd_get_sg_table(dev, req->rd_page); 457 table = rd_get_sg_table(dev, req->rd_page);
480 if (!(table)) 458 if (!table)
481 return -EINVAL; 459 return -EINVAL;
482 460
483 sg_s = &table->sg_table[j = 0]; 461 sg_s = &table->sg_table[j = 0];
@@ -493,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
493static int rd_MEMCPY_write(struct rd_request *req) 471static int rd_MEMCPY_write(struct rd_request *req)
494{ 472{
495 struct se_task *task = &req->rd_task; 473 struct se_task *task = &req->rd_task;
496 struct rd_dev *dev = req->rd_dev; 474 struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
497 struct rd_dev_sg_table *table; 475 struct rd_dev_sg_table *table;
498 struct scatterlist *sg_d, *sg_s; 476 struct scatterlist *sg_d, *sg_s;
499 void *dst, *src; 477 void *dst, *src;
@@ -502,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req)
502 u32 rd_offset = req->rd_offset; 480 u32 rd_offset = req->rd_offset;
503 481
504 table = rd_get_sg_table(dev, req->rd_page); 482 table = rd_get_sg_table(dev, req->rd_page);
505 if (!(table)) 483 if (!table)
506 return -EINVAL; 484 return -EINVAL;
507 485
508 table_sg_end = (table->page_end_offset - req->rd_page); 486 table_sg_end = (table->page_end_offset - req->rd_page);
509 sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; 487 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
510 sg_s = task->task_sg; 488 sg_s = task->task_sg;
511#ifdef DEBUG_RAMDISK_MCP 489
512 printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," 490 pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
513 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 491 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
514 req->rd_page, req->rd_offset); 492 req->rd_page, req->rd_offset);
515#endif 493
516 dst_offset = rd_offset; 494 dst_offset = rd_offset;
517 495
518 while (req->rd_size) { 496 while (req->rd_size) {
519 if ((sg_s[i].length - src_offset) < 497 if ((sg_s[i].length - src_offset) <
520 (sg_d[j].length - dst_offset)) { 498 (sg_d[j].length - dst_offset)) {
521 length = (sg_s[i].length - src_offset); 499 length = (sg_s[i].length - src_offset);
522#ifdef DEBUG_RAMDISK_MCP 500
523 printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" 501 pr_debug("Step 1 - sg_s[%d]: %p length: %d"
524 " offset: %d sg_d[%d].length: %u\n", i, 502 " offset: %d sg_d[%d].length: %u\n", i,
525 &sg_s[i], sg_s[i].length, sg_s[i].offset, 503 &sg_s[i], sg_s[i].length, sg_s[i].offset,
526 j, sg_d[j].length); 504 j, sg_d[j].length);
527 printk(KERN_INFO "Step 1 - length: %u src_offset: %u" 505 pr_debug("Step 1 - length: %u src_offset: %u"
528 " dst_offset: %u\n", length, src_offset, 506 " dst_offset: %u\n", length, src_offset,
529 dst_offset); 507 dst_offset);
530#endif 508
531 if (length > req->rd_size) 509 if (length > req->rd_size)
532 length = req->rd_size; 510 length = req->rd_size;
533 511
@@ -544,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req)
544 page_end = 0; 522 page_end = 0;
545 } else { 523 } else {
546 length = (sg_d[j].length - dst_offset); 524 length = (sg_d[j].length - dst_offset);
547#ifdef DEBUG_RAMDISK_MCP 525
548 printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" 526 pr_debug("Step 2 - sg_s[%d]: %p length: %d"
549 " offset: %d sg_d[%d].length: %u\n", i, 527 " offset: %d sg_d[%d].length: %u\n", i,
550 &sg_s[i], sg_s[i].length, sg_s[i].offset, 528 &sg_s[i], sg_s[i].length, sg_s[i].offset,
551 j, sg_d[j].length); 529 j, sg_d[j].length);
552 printk(KERN_INFO "Step 2 - length: %u src_offset: %u" 530 pr_debug("Step 2 - length: %u src_offset: %u"
553 " dst_offset: %u\n", length, src_offset, 531 " dst_offset: %u\n", length, src_offset,
554 dst_offset); 532 dst_offset);
555#endif 533
556 if (length > req->rd_size) 534 if (length > req->rd_size)
557 length = req->rd_size; 535 length = req->rd_size;
558 536
@@ -576,31 +554,28 @@ static int rd_MEMCPY_write(struct rd_request *req)
576 554
577 memcpy(dst, src, length); 555 memcpy(dst, src, length);
578 556
579#ifdef DEBUG_RAMDISK_MCP 557 pr_debug("page: %u, remaining size: %u, length: %u,"
580 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
581 " i: %u, j: %u\n", req->rd_page, 558 " i: %u, j: %u\n", req->rd_page,
582 (req->rd_size - length), length, i, j); 559 (req->rd_size - length), length, i, j);
583#endif 560
584 req->rd_size -= length; 561 req->rd_size -= length;
585 if (!(req->rd_size)) 562 if (!req->rd_size)
586 return 0; 563 return 0;
587 564
588 if (!page_end) 565 if (!page_end)
589 continue; 566 continue;
590 567
591 if (++req->rd_page <= table->page_end_offset) { 568 if (++req->rd_page <= table->page_end_offset) {
592#ifdef DEBUG_RAMDISK_MCP 569 pr_debug("page: %u in same page table\n",
593 printk(KERN_INFO "page: %u in same page table\n",
594 req->rd_page); 570 req->rd_page);
595#endif
596 continue; 571 continue;
597 } 572 }
598#ifdef DEBUG_RAMDISK_MCP 573
599 printk(KERN_INFO "getting new page table for page: %u\n", 574 pr_debug("getting new page table for page: %u\n",
600 req->rd_page); 575 req->rd_page);
601#endif 576
602 table = rd_get_sg_table(dev, req->rd_page); 577 table = rd_get_sg_table(dev, req->rd_page);
603 if (!(table)) 578 if (!table)
604 return -EINVAL; 579 return -EINVAL;
605 580
606 sg_d = &table->sg_table[j = 0]; 581 sg_d = &table->sg_table[j = 0];
@@ -641,273 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task)
641 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 616 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
642} 617}
643 618
644/* rd_DIRECT_with_offset():
645 *
646 *
647 */
648static int rd_DIRECT_with_offset(
649 struct se_task *task,
650 struct list_head *se_mem_list,
651 u32 *se_mem_cnt,
652 u32 *task_offset)
653{
654 struct rd_request *req = RD_REQ(task);
655 struct rd_dev *dev = req->rd_dev;
656 struct rd_dev_sg_table *table;
657 struct se_mem *se_mem;
658 struct scatterlist *sg_s;
659 u32 j = 0, set_offset = 1;
660 u32 get_next_table = 0, offset_length, table_sg_end;
661
662 table = rd_get_sg_table(dev, req->rd_page);
663 if (!(table))
664 return -EINVAL;
665
666 table_sg_end = (table->page_end_offset - req->rd_page);
667 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
668#ifdef DEBUG_RAMDISK_DR
669 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
670 (task->task_data_direction == DMA_TO_DEVICE) ?
671 "Write" : "Read",
672 task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
673#endif
674 while (req->rd_size) {
675 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
676 if (!(se_mem)) {
677 printk(KERN_ERR "Unable to allocate struct se_mem\n");
678 return -ENOMEM;
679 }
680 INIT_LIST_HEAD(&se_mem->se_list);
681
682 if (set_offset) {
683 offset_length = sg_s[j].length - req->rd_offset;
684 if (offset_length > req->rd_size)
685 offset_length = req->rd_size;
686
687 se_mem->se_page = sg_page(&sg_s[j++]);
688 se_mem->se_off = req->rd_offset;
689 se_mem->se_len = offset_length;
690
691 set_offset = 0;
692 get_next_table = (j > table_sg_end);
693 goto check_eot;
694 }
695
696 offset_length = (req->rd_size < req->rd_offset) ?
697 req->rd_size : req->rd_offset;
698
699 se_mem->se_page = sg_page(&sg_s[j]);
700 se_mem->se_len = offset_length;
701
702 set_offset = 1;
703
704check_eot:
705#ifdef DEBUG_RAMDISK_DR
706 printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
707 " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
708 req->rd_page, req->rd_size, offset_length, j, se_mem,
709 se_mem->se_page, se_mem->se_off, se_mem->se_len);
710#endif
711 list_add_tail(&se_mem->se_list, se_mem_list);
712 (*se_mem_cnt)++;
713
714 req->rd_size -= offset_length;
715 if (!(req->rd_size))
716 goto out;
717
718 if (!set_offset && !get_next_table)
719 continue;
720
721 if (++req->rd_page <= table->page_end_offset) {
722#ifdef DEBUG_RAMDISK_DR
723 printk(KERN_INFO "page: %u in same page table\n",
724 req->rd_page);
725#endif
726 continue;
727 }
728#ifdef DEBUG_RAMDISK_DR
729 printk(KERN_INFO "getting new page table for page: %u\n",
730 req->rd_page);
731#endif
732 table = rd_get_sg_table(dev, req->rd_page);
733 if (!(table))
734 return -EINVAL;
735
736 sg_s = &table->sg_table[j = 0];
737 }
738
739out:
740 task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
741#ifdef DEBUG_RAMDISK_DR
742 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
743 *se_mem_cnt);
744#endif
745 return 0;
746}
747
748/* rd_DIRECT_without_offset():
749 *
750 *
751 */
752static int rd_DIRECT_without_offset(
753 struct se_task *task,
754 struct list_head *se_mem_list,
755 u32 *se_mem_cnt,
756 u32 *task_offset)
757{
758 struct rd_request *req = RD_REQ(task);
759 struct rd_dev *dev = req->rd_dev;
760 struct rd_dev_sg_table *table;
761 struct se_mem *se_mem;
762 struct scatterlist *sg_s;
763 u32 length, j = 0;
764
765 table = rd_get_sg_table(dev, req->rd_page);
766 if (!(table))
767 return -EINVAL;
768
769 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
770#ifdef DEBUG_RAMDISK_DR
771 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
772 (task->task_data_direction == DMA_TO_DEVICE) ?
773 "Write" : "Read",
774 task->task_lba, req->rd_size, req->rd_page);
775#endif
776 while (req->rd_size) {
777 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
778 if (!(se_mem)) {
779 printk(KERN_ERR "Unable to allocate struct se_mem\n");
780 return -ENOMEM;
781 }
782 INIT_LIST_HEAD(&se_mem->se_list);
783
784 length = (req->rd_size < sg_s[j].length) ?
785 req->rd_size : sg_s[j].length;
786
787 se_mem->se_page = sg_page(&sg_s[j++]);
788 se_mem->se_len = length;
789
790#ifdef DEBUG_RAMDISK_DR
791 printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
792 " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
793 req->rd_size, j, se_mem, se_mem->se_page,
794 se_mem->se_off, se_mem->se_len);
795#endif
796 list_add_tail(&se_mem->se_list, se_mem_list);
797 (*se_mem_cnt)++;
798
799 req->rd_size -= length;
800 if (!(req->rd_size))
801 goto out;
802
803 if (++req->rd_page <= table->page_end_offset) {
804#ifdef DEBUG_RAMDISK_DR
805 printk("page: %u in same page table\n",
806 req->rd_page);
807#endif
808 continue;
809 }
810#ifdef DEBUG_RAMDISK_DR
811 printk(KERN_INFO "getting new page table for page: %u\n",
812 req->rd_page);
813#endif
814 table = rd_get_sg_table(dev, req->rd_page);
815 if (!(table))
816 return -EINVAL;
817
818 sg_s = &table->sg_table[j = 0];
819 }
820
821out:
822 task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
823#ifdef DEBUG_RAMDISK_DR
824 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
825 *se_mem_cnt);
826#endif
827 return 0;
828}
829
830/* rd_DIRECT_do_se_mem_map():
831 *
832 *
833 */
834static int rd_DIRECT_do_se_mem_map(
835 struct se_task *task,
836 struct list_head *se_mem_list,
837 void *in_mem,
838 struct se_mem *in_se_mem,
839 struct se_mem **out_se_mem,
840 u32 *se_mem_cnt,
841 u32 *task_offset_in)
842{
843 struct se_cmd *cmd = task->task_se_cmd;
844 struct rd_request *req = RD_REQ(task);
845 u32 task_offset = *task_offset_in;
846 unsigned long long lba;
847 int ret;
848 int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size;
849
850 lba = task->task_lba;
851 req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE);
852 req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size;
853 req->rd_size = task->task_size;
854
855 if (req->rd_offset)
856 ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
857 task_offset_in);
858 else
859 ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
860 task_offset_in);
861
862 if (ret < 0)
863 return ret;
864
865 if (cmd->se_tfo->task_sg_chaining == 0)
866 return 0;
867 /*
868 * Currently prevent writers from multiple HW fabrics doing
869 * pci_map_sg() to RD_DR's internal scatterlist memory.
870 */
871 if (cmd->data_direction == DMA_TO_DEVICE) {
872 printk(KERN_ERR "DMA_TO_DEVICE not supported for"
873 " RAMDISK_DR with task_sg_chaining=1\n");
874 return -ENOSYS;
875 }
876 /*
877 * Special case for if task_sg_chaining is enabled, then
878 * we setup struct se_task->task_sg[], as it will be used by
879 * transport_do_task_sg_chain() for creating chainged SGLs
880 * across multiple struct se_task->task_sg[].
881 */
882 ret = transport_init_task_sg(task,
883 list_first_entry(&cmd->t_mem_list,
884 struct se_mem, se_list),
885 task_offset);
886 if (ret <= 0)
887 return ret;
888
889 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
890 list_first_entry(&cmd->t_mem_list,
891 struct se_mem, se_list),
892 out_se_mem, se_mem_cnt, task_offset_in);
893}
894
895/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
896 *
897 *
898 */
899static int rd_DIRECT_do_task(struct se_task *task)
900{
901 /*
902 * At this point the locally allocated RD tables have been mapped
903 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
904 */
905 task->task_scsi_status = GOOD;
906 transport_complete_task(task, 1);
907
908 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
909}
910
911/* rd_free_task(): (Part of se_subsystem_api_t template) 619/* rd_free_task(): (Part of se_subsystem_api_t template)
912 * 620 *
913 * 621 *
@@ -952,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params(
952 case Opt_rd_pages: 660 case Opt_rd_pages:
953 match_int(args, &arg); 661 match_int(args, &arg);
954 rd_dev->rd_page_count = arg; 662 rd_dev->rd_page_count = arg;
955 printk(KERN_INFO "RAMDISK: Referencing Page" 663 pr_debug("RAMDISK: Referencing Page"
956 " Count: %u\n", rd_dev->rd_page_count); 664 " Count: %u\n", rd_dev->rd_page_count);
957 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 665 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
958 break; 666 break;
@@ -970,7 +678,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
970 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 678 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
971 679
972 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 680 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
973 printk(KERN_INFO "Missing rd_pages= parameter\n"); 681 pr_debug("Missing rd_pages= parameter\n");
974 return -EINVAL; 682 return -EINVAL;
975 } 683 }
976 684
@@ -1022,27 +730,6 @@ static sector_t rd_get_blocks(struct se_device *dev)
1022 return blocks_long; 730 return blocks_long;
1023} 731}
1024 732
1025static struct se_subsystem_api rd_dr_template = {
1026 .name = "rd_dr",
1027 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1028 .attach_hba = rd_attach_hba,
1029 .detach_hba = rd_detach_hba,
1030 .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
1031 .create_virtdevice = rd_DIRECT_create_virtdevice,
1032 .free_device = rd_free_device,
1033 .alloc_task = rd_alloc_task,
1034 .do_task = rd_DIRECT_do_task,
1035 .free_task = rd_free_task,
1036 .check_configfs_dev_params = rd_check_configfs_dev_params,
1037 .set_configfs_dev_params = rd_set_configfs_dev_params,
1038 .show_configfs_dev_params = rd_show_configfs_dev_params,
1039 .get_cdb = rd_get_cdb,
1040 .get_device_rev = rd_get_device_rev,
1041 .get_device_type = rd_get_device_type,
1042 .get_blocks = rd_get_blocks,
1043 .do_se_mem_map = rd_DIRECT_do_se_mem_map,
1044};
1045
1046static struct se_subsystem_api rd_mcp_template = { 733static struct se_subsystem_api rd_mcp_template = {
1047 .name = "rd_mcp", 734 .name = "rd_mcp",
1048 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 735 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
@@ -1067,13 +754,8 @@ int __init rd_module_init(void)
1067{ 754{
1068 int ret; 755 int ret;
1069 756
1070 ret = transport_subsystem_register(&rd_dr_template);
1071 if (ret < 0)
1072 return ret;
1073
1074 ret = transport_subsystem_register(&rd_mcp_template); 757 ret = transport_subsystem_register(&rd_mcp_template);
1075 if (ret < 0) { 758 if (ret < 0) {
1076 transport_subsystem_release(&rd_dr_template);
1077 return ret; 759 return ret;
1078 } 760 }
1079 761
@@ -1082,6 +764,5 @@ int __init rd_module_init(void)
1082 764
1083void rd_module_exit(void) 765void rd_module_exit(void)
1084{ 766{
1085 transport_subsystem_release(&rd_dr_template);
1086 transport_subsystem_release(&rd_mcp_template); 767 transport_subsystem_release(&rd_mcp_template);
1087} 768}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index bab93020a3a9..0d027732cd00 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -32,8 +32,6 @@ struct rd_request {
32 u32 rd_page_count; 32 u32 rd_page_count;
33 /* Scatterlist count */ 33 /* Scatterlist count */
34 u32 rd_size; 34 u32 rd_size;
35 /* Ramdisk device */
36 struct rd_dev *rd_dev;
37} ____cacheline_aligned; 35} ____cacheline_aligned;
38 36
39struct rd_dev_sg_table { 37struct rd_dev_sg_table {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index f1feea3b2319..27d4925e51c3 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -41,13 +41,6 @@
41#include "target_core_alua.h" 41#include "target_core_alua.h"
42#include "target_core_pr.h" 42#include "target_core_pr.h"
43 43
44#define DEBUG_LUN_RESET
45#ifdef DEBUG_LUN_RESET
46#define DEBUG_LR(x...) printk(KERN_INFO x)
47#else
48#define DEBUG_LR(x...)
49#endif
50
51struct se_tmr_req *core_tmr_alloc_req( 44struct se_tmr_req *core_tmr_alloc_req(
52 struct se_cmd *se_cmd, 45 struct se_cmd *se_cmd,
53 void *fabric_tmr_ptr, 46 void *fabric_tmr_ptr,
@@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req(
57 50
58 tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? 51 tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
59 GFP_ATOMIC : GFP_KERNEL); 52 GFP_ATOMIC : GFP_KERNEL);
60 if (!(tmr)) { 53 if (!tmr) {
61 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); 54 pr_err("Unable to allocate struct se_tmr_req\n");
62 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
63 } 56 }
64 tmr->task_cmd = se_cmd; 57 tmr->task_cmd = se_cmd;
@@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort(
93 int tas, 86 int tas,
94 int fe_count) 87 int fe_count)
95{ 88{
96 if (!(fe_count)) { 89 if (!fe_count) {
97 transport_cmd_finish_abort(cmd, 1); 90 transport_cmd_finish_abort(cmd, 1);
98 return; 91 return;
99 } 92 }
100 /* 93 /*
101 * TASK ABORTED status (TAS) bit support 94 * TASK ABORTED status (TAS) bit support
102 */ 95 */
103 if (((tmr_nacl != NULL) && 96 if ((tmr_nacl &&
104 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) 97 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
105 transport_send_task_abort(cmd); 98 transport_send_task_abort(cmd);
106 99
@@ -141,13 +134,13 @@ int core_tmr_lun_reset(
141 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 134 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
142 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 135 tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
143 if (tmr_nacl && tmr_tpg) { 136 if (tmr_nacl && tmr_tpg) {
144 DEBUG_LR("LUN_RESET: TMR caller fabric: %s" 137 pr_debug("LUN_RESET: TMR caller fabric: %s"
145 " initiator port %s\n", 138 " initiator port %s\n",
146 tmr_tpg->se_tpg_tfo->get_fabric_name(), 139 tmr_tpg->se_tpg_tfo->get_fabric_name(),
147 tmr_nacl->initiatorname); 140 tmr_nacl->initiatorname);
148 } 141 }
149 } 142 }
150 DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", 143 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
151 (preempt_and_abort_list) ? "Preempt" : "TMR", 144 (preempt_and_abort_list) ? "Preempt" : "TMR",
152 dev->transport->name, tas); 145 dev->transport->name, tas);
153 /* 146 /*
@@ -163,8 +156,8 @@ int core_tmr_lun_reset(
163 continue; 156 continue;
164 157
165 cmd = tmr_p->task_cmd; 158 cmd = tmr_p->task_cmd;
166 if (!(cmd)) { 159 if (!cmd) {
167 printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); 160 pr_err("Unable to locate struct se_cmd for TMR\n");
168 continue; 161 continue;
169 } 162 }
170 /* 163 /*
@@ -172,14 +165,14 @@ int core_tmr_lun_reset(
172 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 165 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
173 * skip non regisration key matching TMRs. 166 * skip non regisration key matching TMRs.
174 */ 167 */
175 if ((preempt_and_abort_list != NULL) && 168 if (preempt_and_abort_list &&
176 (core_scsi3_check_cdb_abort_and_preempt( 169 (core_scsi3_check_cdb_abort_and_preempt(
177 preempt_and_abort_list, cmd) != 0)) 170 preempt_and_abort_list, cmd) != 0))
178 continue; 171 continue;
179 spin_unlock_irq(&dev->se_tmr_lock); 172 spin_unlock_irq(&dev->se_tmr_lock);
180 173
181 spin_lock_irqsave(&cmd->t_state_lock, flags); 174 spin_lock_irqsave(&cmd->t_state_lock, flags);
182 if (!(atomic_read(&cmd->t_transport_active))) { 175 if (!atomic_read(&cmd->t_transport_active)) {
183 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 176 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
184 spin_lock_irq(&dev->se_tmr_lock); 177 spin_lock_irq(&dev->se_tmr_lock);
185 continue; 178 continue;
@@ -189,7 +182,7 @@ int core_tmr_lun_reset(
189 spin_lock_irq(&dev->se_tmr_lock); 182 spin_lock_irq(&dev->se_tmr_lock);
190 continue; 183 continue;
191 } 184 }
192 DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 185 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
193 " Response: 0x%02x, t_state: %d\n", 186 " Response: 0x%02x, t_state: %d\n",
194 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 187 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
195 tmr_p->function, tmr_p->response, cmd->t_state); 188 tmr_p->function, tmr_p->response, cmd->t_state);
@@ -224,7 +217,7 @@ int core_tmr_lun_reset(
224 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, 217 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
225 t_state_list) { 218 t_state_list) {
226 if (!task->task_se_cmd) { 219 if (!task->task_se_cmd) {
227 printk(KERN_ERR "task->task_se_cmd is NULL!\n"); 220 pr_err("task->task_se_cmd is NULL!\n");
228 continue; 221 continue;
229 } 222 }
230 cmd = task->task_se_cmd; 223 cmd = task->task_se_cmd;
@@ -233,7 +226,7 @@ int core_tmr_lun_reset(
233 * For PREEMPT_AND_ABORT usage, only process commands 226 * For PREEMPT_AND_ABORT usage, only process commands
234 * with a matching reservation key. 227 * with a matching reservation key.
235 */ 228 */
236 if ((preempt_and_abort_list != NULL) && 229 if (preempt_and_abort_list &&
237 (core_scsi3_check_cdb_abort_and_preempt( 230 (core_scsi3_check_cdb_abort_and_preempt(
238 preempt_and_abort_list, cmd) != 0)) 231 preempt_and_abort_list, cmd) != 0))
239 continue; 232 continue;
@@ -248,14 +241,14 @@ int core_tmr_lun_reset(
248 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 241 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
249 242
250 spin_lock_irqsave(&cmd->t_state_lock, flags); 243 spin_lock_irqsave(&cmd->t_state_lock, flags);
251 DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" 244 pr_debug("LUN_RESET: %s cmd: %p task: %p"
252 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" 245 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
253 "def_t_state: %d/%d cdb: 0x%02x\n", 246 "def_t_state: %d/%d cdb: 0x%02x\n",
254 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 247 (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
255 cmd->se_tfo->get_task_tag(cmd), 0, 248 cmd->se_tfo->get_task_tag(cmd), 0,
256 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 249 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
257 cmd->deferred_t_state, cmd->t_task_cdb[0]); 250 cmd->deferred_t_state, cmd->t_task_cdb[0]);
258 DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 251 pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
259 " t_task_cdbs: %d t_task_cdbs_left: %d" 252 " t_task_cdbs: %d t_task_cdbs_left: %d"
260 " t_task_cdbs_sent: %d -- t_transport_active: %d" 253 " t_task_cdbs_sent: %d -- t_transport_active: %d"
261 " t_transport_stop: %d t_transport_sent: %d\n", 254 " t_transport_stop: %d t_transport_sent: %d\n",
@@ -272,10 +265,10 @@ int core_tmr_lun_reset(
272 spin_unlock_irqrestore( 265 spin_unlock_irqrestore(
273 &cmd->t_state_lock, flags); 266 &cmd->t_state_lock, flags);
274 267
275 DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" 268 pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
276 " for dev: %p\n", task, dev); 269 " for dev: %p\n", task, dev);
277 wait_for_completion(&task->task_stop_comp); 270 wait_for_completion(&task->task_stop_comp);
278 DEBUG_LR("LUN_RESET Completed task: %p shutdown for" 271 pr_debug("LUN_RESET Completed task: %p shutdown for"
279 " dev: %p\n", task, dev); 272 " dev: %p\n", task, dev);
280 spin_lock_irqsave(&cmd->t_state_lock, flags); 273 spin_lock_irqsave(&cmd->t_state_lock, flags);
281 atomic_dec(&cmd->t_task_cdbs_left); 274 atomic_dec(&cmd->t_task_cdbs_left);
@@ -288,10 +281,10 @@ int core_tmr_lun_reset(
288 } 281 }
289 __transport_stop_task_timer(task, &flags); 282 __transport_stop_task_timer(task, &flags);
290 283
291 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { 284 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
292 spin_unlock_irqrestore( 285 spin_unlock_irqrestore(
293 &cmd->t_state_lock, flags); 286 &cmd->t_state_lock, flags);
294 DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" 287 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
295 " t_task_cdbs_ex_left: %d\n", task, dev, 288 " t_task_cdbs_ex_left: %d\n", task, dev,
296 atomic_read(&cmd->t_task_cdbs_ex_left)); 289 atomic_read(&cmd->t_task_cdbs_ex_left));
297 290
@@ -301,7 +294,7 @@ int core_tmr_lun_reset(
301 fe_count = atomic_read(&cmd->t_fe_count); 294 fe_count = atomic_read(&cmd->t_fe_count);
302 295
303 if (atomic_read(&cmd->t_transport_active)) { 296 if (atomic_read(&cmd->t_transport_active)) {
304 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" 297 pr_debug("LUN_RESET: got t_transport_active = 1 for"
305 " task: %p, t_fe_count: %d dev: %p\n", task, 298 " task: %p, t_fe_count: %d dev: %p\n", task,
306 fe_count, dev); 299 fe_count, dev);
307 atomic_set(&cmd->t_transport_aborted, 1); 300 atomic_set(&cmd->t_transport_aborted, 1);
@@ -312,7 +305,7 @@ int core_tmr_lun_reset(
312 spin_lock_irqsave(&dev->execute_task_lock, flags); 305 spin_lock_irqsave(&dev->execute_task_lock, flags);
313 continue; 306 continue;
314 } 307 }
315 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," 308 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
316 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 309 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
317 atomic_set(&cmd->t_transport_aborted, 1); 310 atomic_set(&cmd->t_transport_aborted, 1);
318 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 311 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -335,7 +328,7 @@ int core_tmr_lun_reset(
335 * For PREEMPT_AND_ABORT usage, only process commands 328 * For PREEMPT_AND_ABORT usage, only process commands
336 * with a matching reservation key. 329 * with a matching reservation key.
337 */ 330 */
338 if ((preempt_and_abort_list != NULL) && 331 if (preempt_and_abort_list &&
339 (core_scsi3_check_cdb_abort_and_preempt( 332 (core_scsi3_check_cdb_abort_and_preempt(
340 preempt_and_abort_list, cmd) != 0)) 333 preempt_and_abort_list, cmd) != 0))
341 continue; 334 continue;
@@ -350,7 +343,7 @@ int core_tmr_lun_reset(
350 list_del(&cmd->se_queue_node); 343 list_del(&cmd->se_queue_node);
351 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 344 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
352 345
353 DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 346 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
354 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 347 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
355 "Preempt" : "", cmd, cmd->t_state, 348 "Preempt" : "", cmd, cmd->t_state,
356 atomic_read(&cmd->t_fe_count)); 349 atomic_read(&cmd->t_fe_count));
@@ -368,20 +361,20 @@ int core_tmr_lun_reset(
368 * Clear any legacy SPC-2 reservation when called during 361 * Clear any legacy SPC-2 reservation when called during
369 * LOGICAL UNIT RESET 362 * LOGICAL UNIT RESET
370 */ 363 */
371 if (!(preempt_and_abort_list) && 364 if (!preempt_and_abort_list &&
372 (dev->dev_flags & DF_SPC2_RESERVATIONS)) { 365 (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
373 spin_lock(&dev->dev_reservation_lock); 366 spin_lock(&dev->dev_reservation_lock);
374 dev->dev_reserved_node_acl = NULL; 367 dev->dev_reserved_node_acl = NULL;
375 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 368 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
376 spin_unlock(&dev->dev_reservation_lock); 369 spin_unlock(&dev->dev_reservation_lock);
377 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); 370 pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
378 } 371 }
379 372
380 spin_lock_irq(&dev->stats_lock); 373 spin_lock_irq(&dev->stats_lock);
381 dev->num_resets++; 374 dev->num_resets++;
382 spin_unlock_irq(&dev->stats_lock); 375 spin_unlock_irq(&dev->stats_lock);
383 376
384 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", 377 pr_debug("LUN_RESET: %s for [%s] Complete\n",
385 (preempt_and_abort_list) ? "Preempt" : "TMR", 378 (preempt_and_abort_list) ? "Preempt" : "TMR",
386 dev->transport->name); 379 dev->transport->name);
387 return 0; 380 return 0;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 448129f74cf9..4f1ba4c5ef11 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -72,7 +72,7 @@ static void core_clear_initiator_node_from_tpg(
72 continue; 72 continue;
73 73
74 if (!deve->se_lun) { 74 if (!deve->se_lun) {
75 printk(KERN_ERR "%s device entries device pointer is" 75 pr_err("%s device entries device pointer is"
76 " NULL, but Initiator has access.\n", 76 " NULL, but Initiator has access.\n",
77 tpg->se_tpg_tfo->get_fabric_name()); 77 tpg->se_tpg_tfo->get_fabric_name());
78 continue; 78 continue;
@@ -86,14 +86,13 @@ static void core_clear_initiator_node_from_tpg(
86 spin_lock(&lun->lun_acl_lock); 86 spin_lock(&lun->lun_acl_lock);
87 list_for_each_entry_safe(acl, acl_tmp, 87 list_for_each_entry_safe(acl, acl_tmp,
88 &lun->lun_acl_list, lacl_list) { 88 &lun->lun_acl_list, lacl_list) {
89 if (!(strcmp(acl->initiatorname, 89 if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
90 nacl->initiatorname)) && 90 (acl->mapped_lun == deve->mapped_lun))
91 (acl->mapped_lun == deve->mapped_lun))
92 break; 91 break;
93 } 92 }
94 93
95 if (!acl) { 94 if (!acl) {
96 printk(KERN_ERR "Unable to locate struct se_lun_acl for %s," 95 pr_err("Unable to locate struct se_lun_acl for %s,"
97 " mapped_lun: %u\n", nacl->initiatorname, 96 " mapped_lun: %u\n", nacl->initiatorname,
98 deve->mapped_lun); 97 deve->mapped_lun);
99 spin_unlock(&lun->lun_acl_lock); 98 spin_unlock(&lun->lun_acl_lock);
@@ -121,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(
121 struct se_node_acl *acl; 120 struct se_node_acl *acl;
122 121
123 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 122 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
124 if (!(strcmp(acl->initiatorname, initiatorname))) 123 if (!strcmp(acl->initiatorname, initiatorname))
125 return acl; 124 return acl;
126 } 125 }
127 126
@@ -140,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
140 139
141 spin_lock_bh(&tpg->acl_node_lock); 140 spin_lock_bh(&tpg->acl_node_lock);
142 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
143 if (!(strcmp(acl->initiatorname, initiatorname)) && 142 if (!strcmp(acl->initiatorname, initiatorname) &&
144 (!(acl->dynamic_node_acl))) { 143 !acl->dynamic_node_acl) {
145 spin_unlock_bh(&tpg->acl_node_lock); 144 spin_unlock_bh(&tpg->acl_node_lock);
146 return acl; 145 return acl;
147 } 146 }
@@ -177,7 +176,7 @@ void core_tpg_add_node_to_devs(
177 * By default in LIO-Target $FABRIC_MOD, 176 * By default in LIO-Target $FABRIC_MOD,
178 * demo_mode_write_protect is ON, or READ_ONLY; 177 * demo_mode_write_protect is ON, or READ_ONLY;
179 */ 178 */
180 if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) { 179 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
181 if (dev->dev_flags & DF_READ_ONLY) 180 if (dev->dev_flags & DF_READ_ONLY)
182 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 181 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
183 else 182 else
@@ -193,7 +192,7 @@ void core_tpg_add_node_to_devs(
193 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 192 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
194 } 193 }
195 194
196 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" 195 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
197 " access for LUN in Demo Mode\n", 196 " access for LUN in Demo Mode\n",
198 tpg->se_tpg_tfo->get_fabric_name(), 197 tpg->se_tpg_tfo->get_fabric_name(),
199 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 198 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@@ -216,7 +215,7 @@ static int core_set_queue_depth_for_node(
216 struct se_node_acl *acl) 215 struct se_node_acl *acl)
217{ 216{
218 if (!acl->queue_depth) { 217 if (!acl->queue_depth) {
219 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," 218 pr_err("Queue depth for %s Initiator Node: %s is 0,"
220 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 219 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
221 acl->initiatorname); 220 acl->initiatorname);
222 acl->queue_depth = 1; 221 acl->queue_depth = 1;
@@ -236,8 +235,8 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
236 235
237 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * 236 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
238 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); 237 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
239 if (!(nacl->device_list)) { 238 if (!nacl->device_list) {
240 printk(KERN_ERR "Unable to allocate memory for" 239 pr_err("Unable to allocate memory for"
241 " struct se_node_acl->device_list\n"); 240 " struct se_node_acl->device_list\n");
242 return -ENOMEM; 241 return -ENOMEM;
243 } 242 }
@@ -265,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
265 struct se_node_acl *acl; 264 struct se_node_acl *acl;
266 265
267 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 266 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
268 if ((acl)) 267 if (acl)
269 return acl; 268 return acl;
270 269
271 if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))) 270 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
272 return NULL; 271 return NULL;
273 272
274 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); 273 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
275 if (!(acl)) 274 if (!acl)
276 return NULL; 275 return NULL;
277 276
278 INIT_LIST_HEAD(&acl->acl_list); 277 INIT_LIST_HEAD(&acl->acl_list);
@@ -307,7 +306,7 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
307 tpg->num_node_acls++; 306 tpg->num_node_acls++;
308 spin_unlock_bh(&tpg->acl_node_lock); 307 spin_unlock_bh(&tpg->acl_node_lock);
309 308
310 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
311 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 310 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
312 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 311 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
313 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 312 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
@@ -357,10 +356,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
357 356
358 spin_lock_bh(&tpg->acl_node_lock); 357 spin_lock_bh(&tpg->acl_node_lock);
359 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 358 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
360 if ((acl)) { 359 if (acl) {
361 if (acl->dynamic_node_acl) { 360 if (acl->dynamic_node_acl) {
362 acl->dynamic_node_acl = 0; 361 acl->dynamic_node_acl = 0;
363 printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" 362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
364 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 363 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
365 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 364 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
366 spin_unlock_bh(&tpg->acl_node_lock); 365 spin_unlock_bh(&tpg->acl_node_lock);
@@ -375,7 +374,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
375 goto done; 374 goto done;
376 } 375 }
377 376
378 printk(KERN_ERR "ACL entry for %s Initiator" 377 pr_err("ACL entry for %s Initiator"
379 " Node %s already exists for TPG %u, ignoring" 378 " Node %s already exists for TPG %u, ignoring"
380 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 379 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
381 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 380 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -384,8 +383,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
384 } 383 }
385 spin_unlock_bh(&tpg->acl_node_lock); 384 spin_unlock_bh(&tpg->acl_node_lock);
386 385
387 if (!(se_nacl)) { 386 if (!se_nacl) {
388 printk("struct se_node_acl pointer is NULL\n"); 387 pr_err("struct se_node_acl pointer is NULL\n");
389 return ERR_PTR(-EINVAL); 388 return ERR_PTR(-EINVAL);
390 } 389 }
391 /* 390 /*
@@ -425,7 +424,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
425 spin_unlock_bh(&tpg->acl_node_lock); 424 spin_unlock_bh(&tpg->acl_node_lock);
426 425
427done: 426done:
428 printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 427 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
429 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 428 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
430 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 429 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
431 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 430 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
@@ -463,7 +462,7 @@ int core_tpg_del_initiator_node_acl(
463 /* 462 /*
464 * Determine if the session needs to be closed by our context. 463 * Determine if the session needs to be closed by our context.
465 */ 464 */
466 if (!(tpg->se_tpg_tfo->shutdown_session(sess))) 465 if (!tpg->se_tpg_tfo->shutdown_session(sess))
467 continue; 466 continue;
468 467
469 spin_unlock_bh(&tpg->session_lock); 468 spin_unlock_bh(&tpg->session_lock);
@@ -481,7 +480,7 @@ int core_tpg_del_initiator_node_acl(
481 core_clear_initiator_node_from_tpg(acl, tpg); 480 core_clear_initiator_node_from_tpg(acl, tpg);
482 core_free_device_list_for_node(acl, tpg); 481 core_free_device_list_for_node(acl, tpg);
483 482
484 printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 483 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
485 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 484 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
486 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 485 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
487 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 486 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
@@ -506,8 +505,8 @@ int core_tpg_set_initiator_node_queue_depth(
506 505
507 spin_lock_bh(&tpg->acl_node_lock); 506 spin_lock_bh(&tpg->acl_node_lock);
508 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 507 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
509 if (!(acl)) { 508 if (!acl) {
510 printk(KERN_ERR "Access Control List entry for %s Initiator" 509 pr_err("Access Control List entry for %s Initiator"
511 " Node %s does not exists for TPG %hu, ignoring" 510 " Node %s does not exists for TPG %hu, ignoring"
512 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 511 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
513 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 512 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -526,7 +525,7 @@ int core_tpg_set_initiator_node_queue_depth(
526 continue; 525 continue;
527 526
528 if (!force) { 527 if (!force) {
529 printk(KERN_ERR "Unable to change queue depth for %s" 528 pr_err("Unable to change queue depth for %s"
530 " Initiator Node: %s while session is" 529 " Initiator Node: %s while session is"
531 " operational. To forcefully change the queue" 530 " operational. To forcefully change the queue"
532 " depth and force session reinstatement" 531 " depth and force session reinstatement"
@@ -543,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth(
543 /* 542 /*
544 * Determine if the session needs to be closed by our context. 543 * Determine if the session needs to be closed by our context.
545 */ 544 */
546 if (!(tpg->se_tpg_tfo->shutdown_session(sess))) 545 if (!tpg->se_tpg_tfo->shutdown_session(sess))
547 continue; 546 continue;
548 547
549 init_sess = sess; 548 init_sess = sess;
@@ -586,7 +585,7 @@ int core_tpg_set_initiator_node_queue_depth(
586 if (init_sess) 585 if (init_sess)
587 tpg->se_tpg_tfo->close_session(init_sess); 586 tpg->se_tpg_tfo->close_session(init_sess);
588 587
589 printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" 588 pr_debug("Successfuly changed queue depth to: %d for Initiator"
590 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 589 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
591 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 590 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
592 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 591 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -644,8 +643,8 @@ int core_tpg_register(
644 643
645 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * 644 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
646 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); 645 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
647 if (!(se_tpg->tpg_lun_list)) { 646 if (!se_tpg->tpg_lun_list) {
648 printk(KERN_ERR "Unable to allocate struct se_portal_group->" 647 pr_err("Unable to allocate struct se_portal_group->"
649 "tpg_lun_list\n"); 648 "tpg_lun_list\n");
650 return -ENOMEM; 649 return -ENOMEM;
651 } 650 }
@@ -686,7 +685,7 @@ int core_tpg_register(
686 list_add_tail(&se_tpg->se_tpg_node, &tpg_list); 685 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
687 spin_unlock_bh(&tpg_lock); 686 spin_unlock_bh(&tpg_lock);
688 687
689 printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" 688 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
690 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), 689 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
691 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 690 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
692 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? 691 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
@@ -700,7 +699,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
700{ 699{
701 struct se_node_acl *nacl, *nacl_tmp; 700 struct se_node_acl *nacl, *nacl_tmp;
702 701
703 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 702 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
704 " for endpoint: %s Portal Tag %u\n", 703 " for endpoint: %s Portal Tag %u\n",
705 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 704 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
706 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), 705 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
@@ -749,7 +748,7 @@ struct se_lun *core_tpg_pre_addlun(
749 struct se_lun *lun; 748 struct se_lun *lun;
750 749
751 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 750 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
752 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 751 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
753 "-1: %u for Target Portal Group: %u\n", 752 "-1: %u for Target Portal Group: %u\n",
754 tpg->se_tpg_tfo->get_fabric_name(), 753 tpg->se_tpg_tfo->get_fabric_name(),
755 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, 754 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -760,7 +759,7 @@ struct se_lun *core_tpg_pre_addlun(
760 spin_lock(&tpg->tpg_lun_lock); 759 spin_lock(&tpg->tpg_lun_lock);
761 lun = &tpg->tpg_lun_list[unpacked_lun]; 760 lun = &tpg->tpg_lun_list[unpacked_lun];
762 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { 761 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
763 printk(KERN_ERR "TPG Logical Unit Number: %u is already active" 762 pr_err("TPG Logical Unit Number: %u is already active"
764 " on %s Target Portal Group: %u, ignoring request.\n", 763 " on %s Target Portal Group: %u, ignoring request.\n",
765 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), 764 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
766 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 765 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -808,7 +807,7 @@ struct se_lun *core_tpg_pre_dellun(
808 struct se_lun *lun; 807 struct se_lun *lun;
809 808
810 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 809 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
811 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 810 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
812 "-1: %u for Target Portal Group: %u\n", 811 "-1: %u for Target Portal Group: %u\n",
813 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 812 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
814 TRANSPORT_MAX_LUNS_PER_TPG-1, 813 TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -819,7 +818,7 @@ struct se_lun *core_tpg_pre_dellun(
819 spin_lock(&tpg->tpg_lun_lock); 818 spin_lock(&tpg->tpg_lun_lock);
820 lun = &tpg->tpg_lun_list[unpacked_lun]; 819 lun = &tpg->tpg_lun_list[unpacked_lun];
821 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 820 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
822 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 821 pr_err("%s Logical Unit Number: %u is not active on"
823 " Target Portal Group: %u, ignoring request.\n", 822 " Target Portal Group: %u, ignoring request.\n",
824 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 823 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
825 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 824 tpg->se_tpg_tfo->tpg_get_tag(tpg));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c743d94baf77..55b6588904a4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -58,132 +58,6 @@
58#include "target_core_scdb.h" 58#include "target_core_scdb.h"
59#include "target_core_ua.h" 59#include "target_core_ua.h"
60 60
61/* #define DEBUG_CDB_HANDLER */
62#ifdef DEBUG_CDB_HANDLER
63#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
64#else
65#define DEBUG_CDB_H(x...)
66#endif
67
68/* #define DEBUG_CMD_MAP */
69#ifdef DEBUG_CMD_MAP
70#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
71#else
72#define DEBUG_CMD_M(x...)
73#endif
74
75/* #define DEBUG_MEM_ALLOC */
76#ifdef DEBUG_MEM_ALLOC
77#define DEBUG_MEM(x...) printk(KERN_INFO x)
78#else
79#define DEBUG_MEM(x...)
80#endif
81
82/* #define DEBUG_MEM2_ALLOC */
83#ifdef DEBUG_MEM2_ALLOC
84#define DEBUG_MEM2(x...) printk(KERN_INFO x)
85#else
86#define DEBUG_MEM2(x...)
87#endif
88
89/* #define DEBUG_SG_CALC */
90#ifdef DEBUG_SG_CALC
91#define DEBUG_SC(x...) printk(KERN_INFO x)
92#else
93#define DEBUG_SC(x...)
94#endif
95
96/* #define DEBUG_SE_OBJ */
97#ifdef DEBUG_SE_OBJ
98#define DEBUG_SO(x...) printk(KERN_INFO x)
99#else
100#define DEBUG_SO(x...)
101#endif
102
103/* #define DEBUG_CMD_VOL */
104#ifdef DEBUG_CMD_VOL
105#define DEBUG_VOL(x...) printk(KERN_INFO x)
106#else
107#define DEBUG_VOL(x...)
108#endif
109
110/* #define DEBUG_CMD_STOP */
111#ifdef DEBUG_CMD_STOP
112#define DEBUG_CS(x...) printk(KERN_INFO x)
113#else
114#define DEBUG_CS(x...)
115#endif
116
117/* #define DEBUG_PASSTHROUGH */
118#ifdef DEBUG_PASSTHROUGH
119#define DEBUG_PT(x...) printk(KERN_INFO x)
120#else
121#define DEBUG_PT(x...)
122#endif
123
124/* #define DEBUG_TASK_STOP */
125#ifdef DEBUG_TASK_STOP
126#define DEBUG_TS(x...) printk(KERN_INFO x)
127#else
128#define DEBUG_TS(x...)
129#endif
130
131/* #define DEBUG_TRANSPORT_STOP */
132#ifdef DEBUG_TRANSPORT_STOP
133#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
134#else
135#define DEBUG_TRANSPORT_S(x...)
136#endif
137
138/* #define DEBUG_TASK_FAILURE */
139#ifdef DEBUG_TASK_FAILURE
140#define DEBUG_TF(x...) printk(KERN_INFO x)
141#else
142#define DEBUG_TF(x...)
143#endif
144
145/* #define DEBUG_DEV_OFFLINE */
146#ifdef DEBUG_DEV_OFFLINE
147#define DEBUG_DO(x...) printk(KERN_INFO x)
148#else
149#define DEBUG_DO(x...)
150#endif
151
152/* #define DEBUG_TASK_STATE */
153#ifdef DEBUG_TASK_STATE
154#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
155#else
156#define DEBUG_TSTATE(x...)
157#endif
158
159/* #define DEBUG_STATUS_THR */
160#ifdef DEBUG_STATUS_THR
161#define DEBUG_ST(x...) printk(KERN_INFO x)
162#else
163#define DEBUG_ST(x...)
164#endif
165
166/* #define DEBUG_TASK_TIMEOUT */
167#ifdef DEBUG_TASK_TIMEOUT
168#define DEBUG_TT(x...) printk(KERN_INFO x)
169#else
170#define DEBUG_TT(x...)
171#endif
172
173/* #define DEBUG_GENERIC_REQUEST_FAILURE */
174#ifdef DEBUG_GENERIC_REQUEST_FAILURE
175#define DEBUG_GRF(x...) printk(KERN_INFO x)
176#else
177#define DEBUG_GRF(x...)
178#endif
179
180/* #define DEBUG_SAM_TASK_ATTRS */
181#ifdef DEBUG_SAM_TASK_ATTRS
182#define DEBUG_STA(x...) printk(KERN_INFO x)
183#else
184#define DEBUG_STA(x...)
185#endif
186
187static int sub_api_initialized; 61static int sub_api_initialized;
188 62
189static struct kmem_cache *se_cmd_cache; 63static struct kmem_cache *se_cmd_cache;
@@ -225,62 +99,62 @@ int init_se_kmem_caches(void)
225{ 99{
226 se_cmd_cache = kmem_cache_create("se_cmd_cache", 100 se_cmd_cache = kmem_cache_create("se_cmd_cache",
227 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); 101 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
228 if (!(se_cmd_cache)) { 102 if (!se_cmd_cache) {
229 printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); 103 pr_err("kmem_cache_create for struct se_cmd failed\n");
230 goto out; 104 goto out;
231 } 105 }
232 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 106 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
233 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 107 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
234 0, NULL); 108 0, NULL);
235 if (!(se_tmr_req_cache)) { 109 if (!se_tmr_req_cache) {
236 printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" 110 pr_err("kmem_cache_create() for struct se_tmr_req"
237 " failed\n"); 111 " failed\n");
238 goto out; 112 goto out;
239 } 113 }
240 se_sess_cache = kmem_cache_create("se_sess_cache", 114 se_sess_cache = kmem_cache_create("se_sess_cache",
241 sizeof(struct se_session), __alignof__(struct se_session), 115 sizeof(struct se_session), __alignof__(struct se_session),
242 0, NULL); 116 0, NULL);
243 if (!(se_sess_cache)) { 117 if (!se_sess_cache) {
244 printk(KERN_ERR "kmem_cache_create() for struct se_session" 118 pr_err("kmem_cache_create() for struct se_session"
245 " failed\n"); 119 " failed\n");
246 goto out; 120 goto out;
247 } 121 }
248 se_ua_cache = kmem_cache_create("se_ua_cache", 122 se_ua_cache = kmem_cache_create("se_ua_cache",
249 sizeof(struct se_ua), __alignof__(struct se_ua), 123 sizeof(struct se_ua), __alignof__(struct se_ua),
250 0, NULL); 124 0, NULL);
251 if (!(se_ua_cache)) { 125 if (!se_ua_cache) {
252 printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); 126 pr_err("kmem_cache_create() for struct se_ua failed\n");
253 goto out; 127 goto out;
254 } 128 }
255 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 129 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
256 sizeof(struct t10_pr_registration), 130 sizeof(struct t10_pr_registration),
257 __alignof__(struct t10_pr_registration), 0, NULL); 131 __alignof__(struct t10_pr_registration), 0, NULL);
258 if (!(t10_pr_reg_cache)) { 132 if (!t10_pr_reg_cache) {
259 printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" 133 pr_err("kmem_cache_create() for struct t10_pr_registration"
260 " failed\n"); 134 " failed\n");
261 goto out; 135 goto out;
262 } 136 }
263 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 137 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
264 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 138 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
265 0, NULL); 139 0, NULL);
266 if (!(t10_alua_lu_gp_cache)) { 140 if (!t10_alua_lu_gp_cache) {
267 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" 141 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
268 " failed\n"); 142 " failed\n");
269 goto out; 143 goto out;
270 } 144 }
271 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 145 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
272 sizeof(struct t10_alua_lu_gp_member), 146 sizeof(struct t10_alua_lu_gp_member),
273 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 147 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
274 if (!(t10_alua_lu_gp_mem_cache)) { 148 if (!t10_alua_lu_gp_mem_cache) {
275 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" 149 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
276 "cache failed\n"); 150 "cache failed\n");
277 goto out; 151 goto out;
278 } 152 }
279 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 153 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
280 sizeof(struct t10_alua_tg_pt_gp), 154 sizeof(struct t10_alua_tg_pt_gp),
281 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 155 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
282 if (!(t10_alua_tg_pt_gp_cache)) { 156 if (!t10_alua_tg_pt_gp_cache) {
283 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" 157 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
284 "cache failed\n"); 158 "cache failed\n");
285 goto out; 159 goto out;
286 } 160 }
@@ -289,8 +163,8 @@ int init_se_kmem_caches(void)
289 sizeof(struct t10_alua_tg_pt_gp_member), 163 sizeof(struct t10_alua_tg_pt_gp_member),
290 __alignof__(struct t10_alua_tg_pt_gp_member), 164 __alignof__(struct t10_alua_tg_pt_gp_member),
291 0, NULL); 165 0, NULL);
292 if (!(t10_alua_tg_pt_gp_mem_cache)) { 166 if (!t10_alua_tg_pt_gp_mem_cache) {
293 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" 167 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
294 "mem_t failed\n"); 168 "mem_t failed\n");
295 goto out; 169 goto out;
296 } 170 }
@@ -366,19 +240,19 @@ static int transport_subsystem_reqmods(void)
366 240
367 ret = request_module("target_core_iblock"); 241 ret = request_module("target_core_iblock");
368 if (ret != 0) 242 if (ret != 0)
369 printk(KERN_ERR "Unable to load target_core_iblock\n"); 243 pr_err("Unable to load target_core_iblock\n");
370 244
371 ret = request_module("target_core_file"); 245 ret = request_module("target_core_file");
372 if (ret != 0) 246 if (ret != 0)
373 printk(KERN_ERR "Unable to load target_core_file\n"); 247 pr_err("Unable to load target_core_file\n");
374 248
375 ret = request_module("target_core_pscsi"); 249 ret = request_module("target_core_pscsi");
376 if (ret != 0) 250 if (ret != 0)
377 printk(KERN_ERR "Unable to load target_core_pscsi\n"); 251 pr_err("Unable to load target_core_pscsi\n");
378 252
379 ret = request_module("target_core_stgt"); 253 ret = request_module("target_core_stgt");
380 if (ret != 0) 254 if (ret != 0)
381 printk(KERN_ERR "Unable to load target_core_stgt\n"); 255 pr_err("Unable to load target_core_stgt\n");
382 256
383 return 0; 257 return 0;
384} 258}
@@ -405,8 +279,8 @@ struct se_session *transport_init_session(void)
405 struct se_session *se_sess; 279 struct se_session *se_sess;
406 280
407 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 281 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
408 if (!(se_sess)) { 282 if (!se_sess) {
409 printk(KERN_ERR "Unable to allocate struct se_session from" 283 pr_err("Unable to allocate struct se_session from"
410 " se_sess_cache\n"); 284 " se_sess_cache\n");
411 return ERR_PTR(-ENOMEM); 285 return ERR_PTR(-ENOMEM);
412 } 286 }
@@ -460,7 +334,7 @@ void __transport_register_session(
460 } 334 }
461 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 335 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
462 336
463 printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 337 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
464 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 338 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
465} 339}
466EXPORT_SYMBOL(__transport_register_session); 340EXPORT_SYMBOL(__transport_register_session);
@@ -485,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
485 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 359 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
486 */ 360 */
487 se_nacl = se_sess->se_node_acl; 361 se_nacl = se_sess->se_node_acl;
488 if ((se_nacl)) { 362 if (se_nacl) {
489 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 363 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
490 list_del(&se_sess->sess_acl_list); 364 list_del(&se_sess->sess_acl_list);
491 /* 365 /*
@@ -516,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess)
516 struct se_portal_group *se_tpg = se_sess->se_tpg; 390 struct se_portal_group *se_tpg = se_sess->se_tpg;
517 struct se_node_acl *se_nacl; 391 struct se_node_acl *se_nacl;
518 392
519 if (!(se_tpg)) { 393 if (!se_tpg) {
520 transport_free_session(se_sess); 394 transport_free_session(se_sess);
521 return; 395 return;
522 } 396 }
@@ -532,11 +406,11 @@ void transport_deregister_session(struct se_session *se_sess)
532 * struct se_node_acl if it had been previously dynamically generated. 406 * struct se_node_acl if it had been previously dynamically generated.
533 */ 407 */
534 se_nacl = se_sess->se_node_acl; 408 se_nacl = se_sess->se_node_acl;
535 if ((se_nacl)) { 409 if (se_nacl) {
536 spin_lock_bh(&se_tpg->acl_node_lock); 410 spin_lock_bh(&se_tpg->acl_node_lock);
537 if (se_nacl->dynamic_node_acl) { 411 if (se_nacl->dynamic_node_acl) {
538 if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 412 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
539 se_tpg))) { 413 se_tpg)) {
540 list_del(&se_nacl->acl_list); 414 list_del(&se_nacl->acl_list);
541 se_tpg->num_node_acls--; 415 se_tpg->num_node_acls--;
542 spin_unlock_bh(&se_tpg->acl_node_lock); 416 spin_unlock_bh(&se_tpg->acl_node_lock);
@@ -553,7 +427,7 @@ void transport_deregister_session(struct se_session *se_sess)
553 427
554 transport_free_session(se_sess); 428 transport_free_session(se_sess);
555 429
556 printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", 430 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
557 se_tpg->se_tpg_tfo->get_fabric_name()); 431 se_tpg->se_tpg_tfo->get_fabric_name());
558} 432}
559EXPORT_SYMBOL(transport_deregister_session); 433EXPORT_SYMBOL(transport_deregister_session);
@@ -569,19 +443,19 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
569 443
570 list_for_each_entry(task, &cmd->t_task_list, t_list) { 444 list_for_each_entry(task, &cmd->t_task_list, t_list) {
571 dev = task->se_dev; 445 dev = task->se_dev;
572 if (!(dev)) 446 if (!dev)
573 continue; 447 continue;
574 448
575 if (atomic_read(&task->task_active)) 449 if (atomic_read(&task->task_active))
576 continue; 450 continue;
577 451
578 if (!(atomic_read(&task->task_state_active))) 452 if (!atomic_read(&task->task_state_active))
579 continue; 453 continue;
580 454
581 spin_lock_irqsave(&dev->execute_task_lock, flags); 455 spin_lock_irqsave(&dev->execute_task_lock, flags);
582 list_del(&task->t_state_list); 456 list_del(&task->t_state_list);
583 DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", 457 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
584 cmd->se_tfo->tfo_get_task_tag(cmd), dev, task); 458 cmd->se_tfo->get_task_tag(cmd), dev, task);
585 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 459 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
586 460
587 atomic_set(&task->task_state_active, 0); 461 atomic_set(&task->task_state_active, 0);
@@ -610,7 +484,7 @@ static int transport_cmd_check_stop(
610 * command for LUN shutdown purposes. 484 * command for LUN shutdown purposes.
611 */ 485 */
612 if (atomic_read(&cmd->transport_lun_stop)) { 486 if (atomic_read(&cmd->transport_lun_stop)) {
613 DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)" 487 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
614 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, 488 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
615 cmd->se_tfo->get_task_tag(cmd)); 489 cmd->se_tfo->get_task_tag(cmd));
616 490
@@ -629,7 +503,7 @@ static int transport_cmd_check_stop(
629 * this command for frontend exceptions. 503 * this command for frontend exceptions.
630 */ 504 */
631 if (atomic_read(&cmd->t_transport_stop)) { 505 if (atomic_read(&cmd->t_transport_stop)) {
632 DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) ==" 506 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
633 " TRUE for ITT: 0x%08x\n", __func__, __LINE__, 507 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
634 cmd->se_tfo->get_task_tag(cmd)); 508 cmd->se_tfo->get_task_tag(cmd));
635 509
@@ -695,7 +569,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
695 return; 569 return;
696 570
697 spin_lock_irqsave(&cmd->t_state_lock, flags); 571 spin_lock_irqsave(&cmd->t_state_lock, flags);
698 if (!(atomic_read(&cmd->transport_dev_active))) { 572 if (!atomic_read(&cmd->transport_dev_active)) {
699 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 573 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
700 goto check_lun; 574 goto check_lun;
701 } 575 }
@@ -710,7 +584,7 @@ check_lun:
710 list_del(&cmd->se_lun_node); 584 list_del(&cmd->se_lun_node);
711 atomic_set(&cmd->transport_lun_active, 0); 585 atomic_set(&cmd->transport_lun_active, 0);
712#if 0 586#if 0
713 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" 587 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
714 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); 588 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
715#endif 589#endif
716 } 590 }
@@ -797,7 +671,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
797 unsigned long flags; 671 unsigned long flags;
798 672
799 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 673 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
800 if (!(atomic_read(&cmd->t_transport_queue_active))) { 674 if (!atomic_read(&cmd->t_transport_queue_active)) {
801 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 675 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
802 return; 676 return;
803 } 677 }
@@ -812,7 +686,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
812 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 686 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
813 687
814 if (atomic_read(&cmd->t_transport_queue_active)) { 688 if (atomic_read(&cmd->t_transport_queue_active)) {
815 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", 689 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
816 cmd->se_tfo->get_task_tag(cmd), 690 cmd->se_tfo->get_task_tag(cmd),
817 atomic_read(&cmd->t_transport_queue_active)); 691 atomic_read(&cmd->t_transport_queue_active));
818 } 692 }
@@ -853,7 +727,7 @@ void transport_complete_task(struct se_task *task, int success)
853 int t_state; 727 int t_state;
854 unsigned long flags; 728 unsigned long flags;
855#if 0 729#if 0
856 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, 730 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
857 cmd->t_task_cdb[0], dev); 731 cmd->t_task_cdb[0], dev);
858#endif 732#endif
859 if (dev) 733 if (dev)
@@ -899,8 +773,8 @@ void transport_complete_task(struct se_task *task, int success)
899 * the processing thread. 773 * the processing thread.
900 */ 774 */
901 if (atomic_read(&task->task_timeout)) { 775 if (atomic_read(&task->task_timeout)) {
902 if (!(atomic_dec_and_test( 776 if (!atomic_dec_and_test(
903 &cmd->t_task_cdbs_timeout_left))) { 777 &cmd->t_task_cdbs_timeout_left)) {
904 spin_unlock_irqrestore(&cmd->t_state_lock, 778 spin_unlock_irqrestore(&cmd->t_state_lock,
905 flags); 779 flags);
906 return; 780 return;
@@ -918,7 +792,7 @@ void transport_complete_task(struct se_task *task, int success)
918 * struct se_task from struct se_cmd will complete itself into the 792 * struct se_task from struct se_cmd will complete itself into the
919 * device queue depending upon int success. 793 * device queue depending upon int success.
920 */ 794 */
921 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { 795 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
922 if (!success) 796 if (!success)
923 cmd->t_tasks_failed = 1; 797 cmd->t_tasks_failed = 1;
924 798
@@ -976,9 +850,9 @@ static inline int transport_add_task_check_sam_attr(
976 &task_prev->t_execute_list : 850 &task_prev->t_execute_list :
977 &dev->execute_task_list); 851 &dev->execute_task_list);
978 852
979 DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" 853 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
980 " in execution queue\n", 854 " in execution queue\n",
981 T_TASK(task->task_se_cmd)->t_task_cdb[0]); 855 task->task_se_cmd->t_task_cdb[0]);
982 return 1; 856 return 1;
983 } 857 }
984 /* 858 /*
@@ -1020,7 +894,7 @@ static void __transport_add_task_to_execute_queue(
1020 894
1021 atomic_set(&task->task_state_active, 1); 895 atomic_set(&task->task_state_active, 1);
1022 896
1023 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", 897 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
1024 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 898 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
1025 task, dev); 899 task, dev);
1026} 900}
@@ -1042,8 +916,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1042 list_add_tail(&task->t_state_list, &dev->state_task_list); 916 list_add_tail(&task->t_state_list, &dev->state_task_list);
1043 atomic_set(&task->task_state_active, 1); 917 atomic_set(&task->task_state_active, 1);
1044 918
1045 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", 919 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
1046 task->se_cmd->se_tfo->get_task_tag( 920 task->task_se_cmd->se_tfo->get_task_tag(
1047 task->task_se_cmd), task, dev); 921 task->task_se_cmd), task, dev);
1048 922
1049 spin_unlock(&dev->execute_task_lock); 923 spin_unlock(&dev->execute_task_lock);
@@ -1112,7 +986,7 @@ static void target_qf_do_work(struct work_struct *work)
1112 smp_mb__after_atomic_dec(); 986 smp_mb__after_atomic_dec();
1113 spin_unlock_irq(&dev->qf_cmd_lock); 987 spin_unlock_irq(&dev->qf_cmd_lock);
1114 988
1115 printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue" 989 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
1116 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 990 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
1117 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : 991 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
1118 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 992 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
@@ -1197,7 +1071,7 @@ static void transport_release_all_cmds(struct se_device *dev)
1197 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, 1071 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
1198 flags); 1072 flags);
1199 1073
1200 printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," 1074 pr_err("Releasing ITT: 0x%08x, i_state: %u,"
1201 " t_state: %u directly\n", 1075 " t_state: %u directly\n",
1202 cmd->se_tfo->get_task_tag(cmd), 1076 cmd->se_tfo->get_task_tag(cmd),
1203 cmd->se_tfo->get_cmd_state(cmd), t_state); 1077 cmd->se_tfo->get_cmd_state(cmd), t_state);
@@ -1264,7 +1138,7 @@ void transport_dump_vpd_proto_id(
1264 if (p_buf) 1138 if (p_buf)
1265 strncpy(p_buf, buf, p_buf_len); 1139 strncpy(p_buf, buf, p_buf_len);
1266 else 1140 else
1267 printk(KERN_INFO "%s", buf); 1141 pr_debug("%s", buf);
1268} 1142}
1269 1143
1270void 1144void
@@ -1314,7 +1188,7 @@ int transport_dump_vpd_assoc(
1314 if (p_buf) 1188 if (p_buf)
1315 strncpy(p_buf, buf, p_buf_len); 1189 strncpy(p_buf, buf, p_buf_len);
1316 else 1190 else
1317 printk("%s", buf); 1191 pr_debug("%s", buf);
1318 1192
1319 return ret; 1193 return ret;
1320} 1194}
@@ -1374,7 +1248,7 @@ int transport_dump_vpd_ident_type(
1374 return -EINVAL; 1248 return -EINVAL;
1375 strncpy(p_buf, buf, p_buf_len); 1249 strncpy(p_buf, buf, p_buf_len);
1376 } else { 1250 } else {
1377 printk("%s", buf); 1251 pr_debug("%s", buf);
1378 } 1252 }
1379 1253
1380 return ret; 1254 return ret;
@@ -1425,7 +1299,7 @@ int transport_dump_vpd_ident(
1425 if (p_buf) 1299 if (p_buf)
1426 strncpy(p_buf, buf, p_buf_len); 1300 strncpy(p_buf, buf, p_buf_len);
1427 else 1301 else
1428 printk("%s", buf); 1302 pr_debug("%s", buf);
1429 1303
1430 return ret; 1304 return ret;
1431} 1305}
@@ -1482,7 +1356,7 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
1482 } 1356 }
1483 1357
1484 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 1358 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1485 DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 1359 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1486 " device\n", dev->transport->name, 1360 " device\n", dev->transport->name,
1487 dev->transport->get_device_rev(dev)); 1361 dev->transport->get_device_rev(dev));
1488} 1362}
@@ -1494,32 +1368,32 @@ static void scsi_dump_inquiry(struct se_device *dev)
1494 /* 1368 /*
1495 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1369 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1496 */ 1370 */
1497 printk(" Vendor: "); 1371 pr_debug(" Vendor: ");
1498 for (i = 0; i < 8; i++) 1372 for (i = 0; i < 8; i++)
1499 if (wwn->vendor[i] >= 0x20) 1373 if (wwn->vendor[i] >= 0x20)
1500 printk("%c", wwn->vendor[i]); 1374 pr_debug("%c", wwn->vendor[i]);
1501 else 1375 else
1502 printk(" "); 1376 pr_debug(" ");
1503 1377
1504 printk(" Model: "); 1378 pr_debug(" Model: ");
1505 for (i = 0; i < 16; i++) 1379 for (i = 0; i < 16; i++)
1506 if (wwn->model[i] >= 0x20) 1380 if (wwn->model[i] >= 0x20)
1507 printk("%c", wwn->model[i]); 1381 pr_debug("%c", wwn->model[i]);
1508 else 1382 else
1509 printk(" "); 1383 pr_debug(" ");
1510 1384
1511 printk(" Revision: "); 1385 pr_debug(" Revision: ");
1512 for (i = 0; i < 4; i++) 1386 for (i = 0; i < 4; i++)
1513 if (wwn->revision[i] >= 0x20) 1387 if (wwn->revision[i] >= 0x20)
1514 printk("%c", wwn->revision[i]); 1388 pr_debug("%c", wwn->revision[i]);
1515 else 1389 else
1516 printk(" "); 1390 pr_debug(" ");
1517 1391
1518 printk("\n"); 1392 pr_debug("\n");
1519 1393
1520 device_type = dev->transport->get_device_type(dev); 1394 device_type = dev->transport->get_device_type(dev);
1521 printk(" Type: %s ", scsi_device_type(device_type)); 1395 pr_debug(" Type: %s ", scsi_device_type(device_type));
1522 printk(" ANSI SCSI revision: %02x\n", 1396 pr_debug(" ANSI SCSI revision: %02x\n",
1523 dev->transport->get_device_rev(dev)); 1397 dev->transport->get_device_rev(dev));
1524} 1398}
1525 1399
@@ -1537,8 +1411,8 @@ struct se_device *transport_add_device_to_core_hba(
1537 struct se_device *dev; 1411 struct se_device *dev;
1538 1412
1539 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); 1413 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1540 if (!(dev)) { 1414 if (!dev) {
1541 printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); 1415 pr_err("Unable to allocate memory for se_dev_t\n");
1542 return NULL; 1416 return NULL;
1543 } 1417 }
1544 1418
@@ -1608,7 +1482,7 @@ struct se_device *transport_add_device_to_core_hba(
1608 dev->process_thread = kthread_run(transport_processing_thread, dev, 1482 dev->process_thread = kthread_run(transport_processing_thread, dev,
1609 "LIO_%s", dev->transport->name); 1483 "LIO_%s", dev->transport->name);
1610 if (IS_ERR(dev->process_thread)) { 1484 if (IS_ERR(dev->process_thread)) {
1611 printk(KERN_ERR "Unable to create kthread: LIO_%s\n", 1485 pr_err("Unable to create kthread: LIO_%s\n",
1612 dev->transport->name); 1486 dev->transport->name);
1613 goto out; 1487 goto out;
1614 } 1488 }
@@ -1626,7 +1500,7 @@ struct se_device *transport_add_device_to_core_hba(
1626 */ 1500 */
1627 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1501 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1628 if (!inquiry_prod || !inquiry_rev) { 1502 if (!inquiry_prod || !inquiry_rev) {
1629 printk(KERN_ERR "All non TCM/pSCSI plugins require" 1503 pr_err("All non TCM/pSCSI plugins require"
1630 " INQUIRY consts\n"); 1504 " INQUIRY consts\n");
1631 goto out; 1505 goto out;
1632 } 1506 }
@@ -1688,9 +1562,9 @@ transport_generic_get_task(struct se_cmd *cmd,
1688 struct se_task *task; 1562 struct se_task *task;
1689 struct se_device *dev = cmd->se_dev; 1563 struct se_device *dev = cmd->se_dev;
1690 1564
1691 task = dev->transport->alloc_task(cmd); 1565 task = dev->transport->alloc_task(cmd->t_task_cdb);
1692 if (!task) { 1566 if (!task) {
1693 printk(KERN_ERR "Unable to allocate struct se_task\n"); 1567 pr_err("Unable to allocate struct se_task\n");
1694 return NULL; 1568 return NULL;
1695 } 1569 }
1696 1570
@@ -1751,7 +1625,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1751 return 0; 1625 return 0;
1752 1626
1753 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1627 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1754 DEBUG_STA("SAM Task Attribute ACA" 1628 pr_debug("SAM Task Attribute ACA"
1755 " emulation is not supported\n"); 1629 " emulation is not supported\n");
1756 return -EINVAL; 1630 return -EINVAL;
1757 } 1631 }
@@ -1761,9 +1635,9 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1761 */ 1635 */
1762 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); 1636 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1763 smp_mb__after_atomic_inc(); 1637 smp_mb__after_atomic_inc();
1764 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1638 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1765 cmd->se_ordered_id, cmd->sam_task_attr, 1639 cmd->se_ordered_id, cmd->sam_task_attr,
1766 TRANSPORT(cmd->se_dev)->name); 1640 cmd->se_dev->transport->name);
1767 return 0; 1641 return 0;
1768} 1642}
1769 1643
@@ -1804,7 +1678,7 @@ int transport_generic_allocate_tasks(
1804 * for VARIABLE_LENGTH_CMD 1678 * for VARIABLE_LENGTH_CMD
1805 */ 1679 */
1806 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1680 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1807 printk(KERN_ERR "Received SCSI CDB with command_size: %d that" 1681 pr_err("Received SCSI CDB with command_size: %d that"
1808 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1682 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1809 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1683 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1810 return -EINVAL; 1684 return -EINVAL;
@@ -1817,8 +1691,8 @@ int transport_generic_allocate_tasks(
1817 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1691 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1818 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1692 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1819 GFP_KERNEL); 1693 GFP_KERNEL);
1820 if (!(cmd->t_task_cdb)) { 1694 if (!cmd->t_task_cdb) {
1821 printk(KERN_ERR "Unable to allocate cmd->t_task_cdb" 1695 pr_err("Unable to allocate cmd->t_task_cdb"
1822 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1696 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1823 scsi_command_size(cdb), 1697 scsi_command_size(cdb),
1824 (unsigned long)sizeof(cmd->__t_task_cdb)); 1698 (unsigned long)sizeof(cmd->__t_task_cdb));
@@ -1864,7 +1738,7 @@ int transport_generic_handle_cdb(
1864{ 1738{
1865 if (!cmd->se_lun) { 1739 if (!cmd->se_lun) {
1866 dump_stack(); 1740 dump_stack();
1867 printk(KERN_ERR "cmd->se_lun is NULL\n"); 1741 pr_err("cmd->se_lun is NULL\n");
1868 return -EINVAL; 1742 return -EINVAL;
1869 } 1743 }
1870 1744
@@ -1882,12 +1756,12 @@ int transport_handle_cdb_direct(
1882{ 1756{
1883 if (!cmd->se_lun) { 1757 if (!cmd->se_lun) {
1884 dump_stack(); 1758 dump_stack();
1885 printk(KERN_ERR "cmd->se_lun is NULL\n"); 1759 pr_err("cmd->se_lun is NULL\n");
1886 return -EINVAL; 1760 return -EINVAL;
1887 } 1761 }
1888 if (in_interrupt()) { 1762 if (in_interrupt()) {
1889 dump_stack(); 1763 dump_stack();
1890 printk(KERN_ERR "transport_generic_handle_cdb cannot be called" 1764 pr_err("transport_generic_handle_cdb cannot be called"
1891 " from interrupt context\n"); 1765 " from interrupt context\n");
1892 return -EINVAL; 1766 return -EINVAL;
1893 } 1767 }
@@ -1906,7 +1780,7 @@ int transport_generic_handle_cdb_map(
1906{ 1780{
1907 if (!cmd->se_lun) { 1781 if (!cmd->se_lun) {
1908 dump_stack(); 1782 dump_stack();
1909 printk(KERN_ERR "cmd->se_lun is NULL\n"); 1783 pr_err("cmd->se_lun is NULL\n");
1910 return -EINVAL; 1784 return -EINVAL;
1911 } 1785 }
1912 1786
@@ -1975,7 +1849,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1975 unsigned long flags; 1849 unsigned long flags;
1976 int ret = 0; 1850 int ret = 0;
1977 1851
1978 DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", 1852 pr_debug("ITT[0x%08x] - Stopping tasks\n",
1979 cmd->se_tfo->get_task_tag(cmd)); 1853 cmd->se_tfo->get_task_tag(cmd));
1980 1854
1981 /* 1855 /*
@@ -1984,7 +1858,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1984 spin_lock_irqsave(&cmd->t_state_lock, flags); 1858 spin_lock_irqsave(&cmd->t_state_lock, flags);
1985 list_for_each_entry_safe(task, task_tmp, 1859 list_for_each_entry_safe(task, task_tmp,
1986 &cmd->t_task_list, t_list) { 1860 &cmd->t_task_list, t_list) {
1987 DEBUG_TS("task_no[%d] - Processing task %p\n", 1861 pr_debug("task_no[%d] - Processing task %p\n",
1988 task->task_no, task); 1862 task->task_no, task);
1989 /* 1863 /*
1990 * If the struct se_task has not been sent and is not active, 1864 * If the struct se_task has not been sent and is not active,
@@ -1997,7 +1871,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1997 transport_remove_task_from_execute_queue(task, 1871 transport_remove_task_from_execute_queue(task,
1998 task->se_dev); 1872 task->se_dev);
1999 1873
2000 DEBUG_TS("task_no[%d] - Removed from execute queue\n", 1874 pr_debug("task_no[%d] - Removed from execute queue\n",
2001 task->task_no); 1875 task->task_no);
2002 spin_lock_irqsave(&cmd->t_state_lock, flags); 1876 spin_lock_irqsave(&cmd->t_state_lock, flags);
2003 continue; 1877 continue;
@@ -2012,10 +1886,10 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2012 spin_unlock_irqrestore(&cmd->t_state_lock, 1886 spin_unlock_irqrestore(&cmd->t_state_lock,
2013 flags); 1887 flags);
2014 1888
2015 DEBUG_TS("task_no[%d] - Waiting to complete\n", 1889 pr_debug("task_no[%d] - Waiting to complete\n",
2016 task->task_no); 1890 task->task_no);
2017 wait_for_completion(&task->task_stop_comp); 1891 wait_for_completion(&task->task_stop_comp);
2018 DEBUG_TS("task_no[%d] - Stopped successfully\n", 1892 pr_debug("task_no[%d] - Stopped successfully\n",
2019 task->task_no); 1893 task->task_no);
2020 1894
2021 spin_lock_irqsave(&cmd->t_state_lock, flags); 1895 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -2024,7 +1898,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2024 atomic_set(&task->task_active, 0); 1898 atomic_set(&task->task_active, 0);
2025 atomic_set(&task->task_stop, 0); 1899 atomic_set(&task->task_stop, 0);
2026 } else { 1900 } else {
2027 DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); 1901 pr_debug("task_no[%d] - Did nothing\n", task->task_no);
2028 ret++; 1902 ret++;
2029 } 1903 }
2030 1904
@@ -2046,18 +1920,18 @@ static void transport_generic_request_failure(
2046{ 1920{
2047 int ret = 0; 1921 int ret = 0;
2048 1922
2049 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1923 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2050 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1924 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
2051 cmd->t_task_cdb[0]); 1925 cmd->t_task_cdb[0]);
2052 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" 1926 pr_debug("-----[ i_state: %d t_state/def_t_state:"
2053 " %d/%d transport_error_status: %d\n", 1927 " %d/%d transport_error_status: %d\n",
2054 cmd->se_tfo->get_cmd_state(cmd), 1928 cmd->se_tfo->get_cmd_state(cmd),
2055 cmd->t_state, cmd->deferred_t_state, 1929 cmd->t_state, cmd->deferred_t_state,
2056 cmd->transport_error_status); 1930 cmd->transport_error_status);
2057 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" 1931 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
2058 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1932 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2059 " t_transport_active: %d t_transport_stop: %d" 1933 " t_transport_active: %d t_transport_stop: %d"
2060 " t_transport_sent: %d\n", cmd->t_task_cdbs, 1934 " t_transport_sent: %d\n", cmd->t_task_list_num,
2061 atomic_read(&cmd->t_task_cdbs_left), 1935 atomic_read(&cmd->t_task_cdbs_left),
2062 atomic_read(&cmd->t_task_cdbs_sent), 1936 atomic_read(&cmd->t_task_cdbs_sent),
2063 atomic_read(&cmd->t_task_cdbs_ex_left), 1937 atomic_read(&cmd->t_task_cdbs_ex_left),
@@ -2146,7 +2020,7 @@ static void transport_generic_request_failure(
2146 */ 2020 */
2147 break; 2021 break;
2148 default: 2022 default:
2149 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", 2023 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2150 cmd->t_task_cdb[0], 2024 cmd->t_task_cdb[0],
2151 cmd->transport_error_status); 2025 cmd->transport_error_status);
2152 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2026 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -2164,7 +2038,7 @@ static void transport_generic_request_failure(
2164 2038
2165check_stop: 2039check_stop:
2166 transport_lun_remove_cmd(cmd); 2040 transport_lun_remove_cmd(cmd);
2167 if (!(transport_cmd_check_stop_to_fabric(cmd))) 2041 if (!transport_cmd_check_stop_to_fabric(cmd))
2168 ; 2042 ;
2169 return; 2043 return;
2170 2044
@@ -2178,7 +2052,7 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
2178 unsigned long flags; 2052 unsigned long flags;
2179 2053
2180 spin_lock_irqsave(&cmd->t_state_lock, flags); 2054 spin_lock_irqsave(&cmd->t_state_lock, flags);
2181 if (!(atomic_read(&cmd->t_transport_timeout))) { 2055 if (!atomic_read(&cmd->t_transport_timeout)) {
2182 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2056 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2183 return; 2057 return;
2184 } 2058 }
@@ -2262,7 +2136,7 @@ static void transport_task_timeout_handler(unsigned long data)
2262 struct se_cmd *cmd = task->task_se_cmd; 2136 struct se_cmd *cmd = task->task_se_cmd;
2263 unsigned long flags; 2137 unsigned long flags;
2264 2138
2265 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); 2139 pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2266 2140
2267 spin_lock_irqsave(&cmd->t_state_lock, flags); 2141 spin_lock_irqsave(&cmd->t_state_lock, flags);
2268 if (task->task_flags & TF_STOP) { 2142 if (task->task_flags & TF_STOP) {
@@ -2274,8 +2148,8 @@ static void transport_task_timeout_handler(unsigned long data)
2274 /* 2148 /*
2275 * Determine if transport_complete_task() has already been called. 2149 * Determine if transport_complete_task() has already been called.
2276 */ 2150 */
2277 if (!(atomic_read(&task->task_active))) { 2151 if (!atomic_read(&task->task_active)) {
2278 DEBUG_TT("transport task: %p cmd: %p timeout task_active" 2152 pr_debug("transport task: %p cmd: %p timeout task_active"
2279 " == 0\n", task, cmd); 2153 " == 0\n", task, cmd);
2280 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2154 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2281 return; 2155 return;
@@ -2290,20 +2164,20 @@ static void transport_task_timeout_handler(unsigned long data)
2290 task->task_scsi_status = 1; 2164 task->task_scsi_status = 1;
2291 2165
2292 if (atomic_read(&task->task_stop)) { 2166 if (atomic_read(&task->task_stop)) {
2293 DEBUG_TT("transport task: %p cmd: %p timeout task_stop" 2167 pr_debug("transport task: %p cmd: %p timeout task_stop"
2294 " == 1\n", task, cmd); 2168 " == 1\n", task, cmd);
2295 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2169 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2296 complete(&task->task_stop_comp); 2170 complete(&task->task_stop_comp);
2297 return; 2171 return;
2298 } 2172 }
2299 2173
2300 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { 2174 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
2301 DEBUG_TT("transport task: %p cmd: %p timeout non zero" 2175 pr_debug("transport task: %p cmd: %p timeout non zero"
2302 " t_task_cdbs_left\n", task, cmd); 2176 " t_task_cdbs_left\n", task, cmd);
2303 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2177 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2304 return; 2178 return;
2305 } 2179 }
2306 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", 2180 pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2307 task, cmd); 2181 task, cmd);
2308 2182
2309 cmd->t_state = TRANSPORT_COMPLETE_FAILURE; 2183 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
@@ -2326,7 +2200,7 @@ static void transport_start_task_timer(struct se_task *task)
2326 * If the task_timeout is disabled, exit now. 2200 * If the task_timeout is disabled, exit now.
2327 */ 2201 */
2328 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; 2202 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
2329 if (!(timeout)) 2203 if (!timeout)
2330 return; 2204 return;
2331 2205
2332 init_timer(&task->task_timer); 2206 init_timer(&task->task_timer);
@@ -2337,7 +2211,7 @@ static void transport_start_task_timer(struct se_task *task)
2337 task->task_flags |= TF_RUNNING; 2211 task->task_flags |= TF_RUNNING;
2338 add_timer(&task->task_timer); 2212 add_timer(&task->task_timer);
2339#if 0 2213#if 0
2340 printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" 2214 pr_debug("Starting task timer for cmd: %p task: %p seconds:"
2341 " %d\n", task->task_se_cmd, task, timeout); 2215 " %d\n", task->task_se_cmd, task, timeout);
2342#endif 2216#endif
2343} 2217}
@@ -2349,7 +2223,7 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2349{ 2223{
2350 struct se_cmd *cmd = task->task_se_cmd; 2224 struct se_cmd *cmd = task->task_se_cmd;
2351 2225
2352 if (!(task->task_flags & TF_RUNNING)) 2226 if (!task->task_flags & TF_RUNNING)
2353 return; 2227 return;
2354 2228
2355 task->task_flags |= TF_STOP; 2229 task->task_flags |= TF_STOP;
@@ -2404,9 +2278,9 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2404 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 2278 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2405 atomic_inc(&cmd->se_dev->dev_hoq_count); 2279 atomic_inc(&cmd->se_dev->dev_hoq_count);
2406 smp_mb__after_atomic_inc(); 2280 smp_mb__after_atomic_inc();
2407 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" 2281 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2408 " 0x%02x, se_ordered_id: %u\n", 2282 " 0x%02x, se_ordered_id: %u\n",
2409 cmd->_task_cdb[0], 2283 cmd->t_task_cdb[0],
2410 cmd->se_ordered_id); 2284 cmd->se_ordered_id);
2411 return 1; 2285 return 1;
2412 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 2286 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -2418,7 +2292,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2418 atomic_inc(&cmd->se_dev->dev_ordered_sync); 2292 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2419 smp_mb__after_atomic_inc(); 2293 smp_mb__after_atomic_inc();
2420 2294
2421 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" 2295 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2422 " list, se_ordered_id: %u\n", 2296 " list, se_ordered_id: %u\n",
2423 cmd->t_task_cdb[0], 2297 cmd->t_task_cdb[0],
2424 cmd->se_ordered_id); 2298 cmd->se_ordered_id);
@@ -2427,7 +2301,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2427 * no other older commands exist that need to be 2301 * no other older commands exist that need to be
2428 * completed first. 2302 * completed first.
2429 */ 2303 */
2430 if (!(atomic_read(&cmd->se_dev->simple_cmds))) 2304 if (!atomic_read(&cmd->se_dev->simple_cmds))
2431 return 1; 2305 return 1;
2432 } else { 2306 } else {
2433 /* 2307 /*
@@ -2452,7 +2326,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2452 &cmd->se_dev->delayed_cmd_list); 2326 &cmd->se_dev->delayed_cmd_list);
2453 spin_unlock(&cmd->se_dev->delayed_cmd_lock); 2327 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2454 2328
2455 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" 2329 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2456 " delayed CMD list, se_ordered_id: %u\n", 2330 " delayed CMD list, se_ordered_id: %u\n",
2457 cmd->t_task_cdb[0], cmd->sam_task_attr, 2331 cmd->t_task_cdb[0], cmd->sam_task_attr,
2458 cmd->se_ordered_id); 2332 cmd->se_ordered_id);
@@ -2486,7 +2360,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2486 * Call transport_cmd_check_stop() to see if a fabric exception 2360 * Call transport_cmd_check_stop() to see if a fabric exception
2487 * has occurred that prevents execution. 2361 * has occurred that prevents execution.
2488 */ 2362 */
2489 if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { 2363 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2490 /* 2364 /*
2491 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE 2365 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2492 * attribute for the tasks of the received struct se_cmd CDB 2366 * attribute for the tasks of the received struct se_cmd CDB
@@ -2777,7 +2651,7 @@ static inline u32 transport_get_size(
2777 return sectors; 2651 return sectors;
2778 } 2652 }
2779#if 0 2653#if 0
2780 printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" 2654 pr_debug("Returning block_size: %u, sectors: %u == %u for"
2781 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, 2655 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2782 dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2656 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2783 dev->transport->name); 2657 dev->transport->name);
@@ -2832,8 +2706,8 @@ static void transport_xor_callback(struct se_cmd *cmd)
2832 * 5) transfer the resulting XOR data to the data-in buffer. 2706 * 5) transfer the resulting XOR data to the data-in buffer.
2833 */ 2707 */
2834 buf = kmalloc(cmd->data_length, GFP_KERNEL); 2708 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2835 if (!(buf)) { 2709 if (!buf) {
2836 printk(KERN_ERR "Unable to allocate xor_callback buf\n"); 2710 pr_err("Unable to allocate xor_callback buf\n");
2837 return; 2711 return;
2838 } 2712 }
2839 /* 2713 /*
@@ -2893,18 +2767,18 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2893 continue; 2767 continue;
2894 2768
2895 dev = task->se_dev; 2769 dev = task->se_dev;
2896 if (!(dev)) 2770 if (!dev)
2897 continue; 2771 continue;
2898 2772
2899 if (!dev->transport->get_sense_buffer) { 2773 if (!dev->transport->get_sense_buffer) {
2900 printk(KERN_ERR "dev->transport->get_sense_buffer" 2774 pr_err("dev->transport->get_sense_buffer"
2901 " is NULL\n"); 2775 " is NULL\n");
2902 continue; 2776 continue;
2903 } 2777 }
2904 2778
2905 sense_buffer = dev->transport->get_sense_buffer(task); 2779 sense_buffer = dev->transport->get_sense_buffer(task);
2906 if (!(sense_buffer)) { 2780 if (!sense_buffer) {
2907 printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" 2781 pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
2908 " sense buffer for task with sense\n", 2782 " sense buffer for task with sense\n",
2909 cmd->se_tfo->get_task_tag(cmd), task->task_no); 2783 cmd->se_tfo->get_task_tag(cmd), task->task_no);
2910 continue; 2784 continue;
@@ -2921,7 +2795,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2921 cmd->scsi_sense_length = 2795 cmd->scsi_sense_length =
2922 (TRANSPORT_SENSE_BUFFER + offset); 2796 (TRANSPORT_SENSE_BUFFER + offset);
2923 2797
2924 printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" 2798 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2925 " and sense\n", 2799 " and sense\n",
2926 dev->se_hba->hba_id, dev->transport->name, 2800 dev->se_hba->hba_id, dev->transport->name,
2927 cmd->scsi_status); 2801 cmd->scsi_status);
@@ -2969,13 +2843,12 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2969 2843
2970 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 2844 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2971 2845
2972 if ((cmd->t_task_lba + sectors) > 2846 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2973 transport_dev_end_lba(dev)) { 2847 pr_err("LBA: %llu Sectors: %u exceeds"
2974 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
2975 " transport_dev_end_lba(): %llu\n", 2848 " transport_dev_end_lba(): %llu\n",
2976 cmd->t_task_lba, sectors, 2849 cmd->t_task_lba, sectors,
2977 transport_dev_end_lba(dev)); 2850 transport_dev_end_lba(dev));
2978 printk(KERN_ERR " We should return CHECK_CONDITION" 2851 pr_err(" We should return CHECK_CONDITION"
2979 " but we don't yet\n"); 2852 " but we don't yet\n");
2980 return 0; 2853 return 0;
2981 } 2854 }
@@ -3026,7 +2899,7 @@ static int transport_generic_cmd_sequencer(
3026 */ 2899 */
3027 if (ret > 0) { 2900 if (ret > 0) {
3028#if 0 2901#if 0
3029 printk(KERN_INFO "[%s]: ALUA TG Port not available," 2902 pr_debug("[%s]: ALUA TG Port not available,"
3030 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", 2903 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
3031 cmd->se_tfo->get_fabric_name(), alua_ascq); 2904 cmd->se_tfo->get_fabric_name(), alua_ascq);
3032#endif 2905#endif
@@ -3192,10 +3065,13 @@ static int transport_generic_cmd_sequencer(
3192 if (sector_ret) 3065 if (sector_ret)
3193 goto out_unsupported_cdb; 3066 goto out_unsupported_cdb;
3194 3067
3195 if (sectors != 0) 3068 if (sectors)
3196 size = transport_get_size(sectors, cdb, cmd); 3069 size = transport_get_size(sectors, cdb, cmd);
3197 else 3070 else {
3198 size = dev->se_sub_dev->se_dev_attrib.block_size; 3071 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3072 " supported\n");
3073 goto out_invalid_cdb_field;
3074 }
3199 3075
3200 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3076 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3201 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3077 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
@@ -3207,7 +3083,7 @@ static int transport_generic_cmd_sequencer(
3207 break; 3083 break;
3208 3084
3209 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { 3085 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3210 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" 3086 pr_err("WRITE_SAME PBDATA and LBDATA"
3211 " bits not supported for Block Discard" 3087 " bits not supported for Block Discard"
3212 " Emulation\n"); 3088 " Emulation\n");
3213 goto out_invalid_cdb_field; 3089 goto out_invalid_cdb_field;
@@ -3217,13 +3093,13 @@ static int transport_generic_cmd_sequencer(
3217 * tpws with the UNMAP=1 bit set. 3093 * tpws with the UNMAP=1 bit set.
3218 */ 3094 */
3219 if (!(cdb[10] & 0x08)) { 3095 if (!(cdb[10] & 0x08)) {
3220 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" 3096 pr_err("WRITE_SAME w/o UNMAP bit not"
3221 " supported for Block Discard Emulation\n"); 3097 " supported for Block Discard Emulation\n");
3222 goto out_invalid_cdb_field; 3098 goto out_invalid_cdb_field;
3223 } 3099 }
3224 break; 3100 break;
3225 default: 3101 default:
3226 printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" 3102 pr_err("VARIABLE_LENGTH_CMD service action"
3227 " 0x%04x not supported\n", service_action); 3103 " 0x%04x not supported\n", service_action);
3228 goto out_unsupported_cdb; 3104 goto out_unsupported_cdb;
3229 } 3105 }
@@ -3469,10 +3345,12 @@ static int transport_generic_cmd_sequencer(
3469 if (sector_ret) 3345 if (sector_ret)
3470 goto out_unsupported_cdb; 3346 goto out_unsupported_cdb;
3471 3347
3472 if (sectors != 0) 3348 if (sectors)
3473 size = transport_get_size(sectors, cdb, cmd); 3349 size = transport_get_size(sectors, cdb, cmd);
3474 else 3350 else {
3475 size = dev->se_sub_dev->se_dev_attrib.block_size; 3351 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3352 goto out_invalid_cdb_field;
3353 }
3476 3354
3477 cmd->t_task_lba = get_unaligned_be16(&cdb[2]); 3355 cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
3478 passthrough = (dev->transport->transport_type == 3356 passthrough = (dev->transport->transport_type ==
@@ -3484,9 +3362,9 @@ static int transport_generic_cmd_sequencer(
3484 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3362 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3485 * TCM/FILEIO subsystem plugin backstores. 3363 * TCM/FILEIO subsystem plugin backstores.
3486 */ 3364 */
3487 if (!(passthrough)) { 3365 if (!passthrough) {
3488 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3366 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3489 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" 3367 pr_err("WRITE_SAME PBDATA and LBDATA"
3490 " bits not supported for Block Discard" 3368 " bits not supported for Block Discard"
3491 " Emulation\n"); 3369 " Emulation\n");
3492 goto out_invalid_cdb_field; 3370 goto out_invalid_cdb_field;
@@ -3496,7 +3374,7 @@ static int transport_generic_cmd_sequencer(
3496 * tpws with the UNMAP=1 bit set. 3374 * tpws with the UNMAP=1 bit set.
3497 */ 3375 */
3498 if (!(cdb[1] & 0x08)) { 3376 if (!(cdb[1] & 0x08)) {
3499 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " 3377 pr_err("WRITE_SAME w/o UNMAP bit not "
3500 " supported for Block Discard Emulation\n"); 3378 " supported for Block Discard Emulation\n");
3501 goto out_invalid_cdb_field; 3379 goto out_invalid_cdb_field;
3502 } 3380 }
@@ -3532,7 +3410,7 @@ static int transport_generic_cmd_sequencer(
3532 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3410 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3533 break; 3411 break;
3534 default: 3412 default:
3535 printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" 3413 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3536 " 0x%02x, sending CHECK_CONDITION.\n", 3414 " 0x%02x, sending CHECK_CONDITION.\n",
3537 cmd->se_tfo->get_fabric_name(), cdb[0]); 3415 cmd->se_tfo->get_fabric_name(), cdb[0]);
3538 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; 3416 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
@@ -3540,7 +3418,7 @@ static int transport_generic_cmd_sequencer(
3540 } 3418 }
3541 3419
3542 if (size != cmd->data_length) { 3420 if (size != cmd->data_length) {
3543 printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" 3421 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3544 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 3422 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3545 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 3423 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3546 cmd->data_length, size, cdb[0]); 3424 cmd->data_length, size, cdb[0]);
@@ -3548,7 +3426,7 @@ static int transport_generic_cmd_sequencer(
3548 cmd->cmd_spdtl = size; 3426 cmd->cmd_spdtl = size;
3549 3427
3550 if (cmd->data_direction == DMA_TO_DEVICE) { 3428 if (cmd->data_direction == DMA_TO_DEVICE) {
3551 printk(KERN_ERR "Rejecting underflow/overflow" 3429 pr_err("Rejecting underflow/overflow"
3552 " WRITE data\n"); 3430 " WRITE data\n");
3553 goto out_invalid_cdb_field; 3431 goto out_invalid_cdb_field;
3554 } 3432 }
@@ -3556,8 +3434,8 @@ static int transport_generic_cmd_sequencer(
3556 * Reject READ_* or WRITE_* with overflow/underflow for 3434 * Reject READ_* or WRITE_* with overflow/underflow for
3557 * type SCF_SCSI_DATA_SG_IO_CDB. 3435 * type SCF_SCSI_DATA_SG_IO_CDB.
3558 */ 3436 */
3559 if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { 3437 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3560 printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" 3438 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3561 " CDB on non 512-byte sector setup subsystem" 3439 " CDB on non 512-byte sector setup subsystem"
3562 " plugin: %s\n", dev->transport->name); 3440 " plugin: %s\n", dev->transport->name);
3563 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 3441 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
@@ -3607,14 +3485,14 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3607 atomic_dec(&dev->simple_cmds); 3485 atomic_dec(&dev->simple_cmds);
3608 smp_mb__after_atomic_dec(); 3486 smp_mb__after_atomic_dec();
3609 dev->dev_cur_ordered_id++; 3487 dev->dev_cur_ordered_id++;
3610 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" 3488 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3611 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3489 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3612 cmd->se_ordered_id); 3490 cmd->se_ordered_id);
3613 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3491 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3614 atomic_dec(&dev->dev_hoq_count); 3492 atomic_dec(&dev->dev_hoq_count);
3615 smp_mb__after_atomic_dec(); 3493 smp_mb__after_atomic_dec();
3616 dev->dev_cur_ordered_id++; 3494 dev->dev_cur_ordered_id++;
3617 DEBUG_STA("Incremented dev_cur_ordered_id: %u for" 3495 pr_debug("Incremented dev_cur_ordered_id: %u for"
3618 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3496 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3619 cmd->se_ordered_id); 3497 cmd->se_ordered_id);
3620 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3498 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -3625,7 +3503,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3625 spin_unlock(&dev->ordered_cmd_lock); 3503 spin_unlock(&dev->ordered_cmd_lock);
3626 3504
3627 dev->dev_cur_ordered_id++; 3505 dev->dev_cur_ordered_id++;
3628 DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" 3506 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3629 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 3507 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3630 } 3508 }
3631 /* 3509 /*
@@ -3640,10 +3518,10 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3640 list_del(&cmd_p->se_delayed_node); 3518 list_del(&cmd_p->se_delayed_node);
3641 spin_unlock(&dev->delayed_cmd_lock); 3519 spin_unlock(&dev->delayed_cmd_lock);
3642 3520
3643 DEBUG_STA("Calling add_tasks() for" 3521 pr_debug("Calling add_tasks() for"
3644 " cmd_p: 0x%02x Task Attr: 0x%02x" 3522 " cmd_p: 0x%02x Task Attr: 0x%02x"
3645 " Dormant -> Active, se_ordered_id: %u\n", 3523 " Dormant -> Active, se_ordered_id: %u\n",
3646 T_TASK(cmd_p)->t_task_cdb[0], 3524 cmd_p->t_task_cdb[0],
3647 cmd_p->sam_task_attr, cmd_p->se_ordered_id); 3525 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3648 3526
3649 transport_add_tasks_from_cmd(cmd_p); 3527 transport_add_tasks_from_cmd(cmd_p);
@@ -3812,7 +3690,7 @@ done:
3812 return; 3690 return;
3813 3691
3814queue_full: 3692queue_full:
3815 printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p," 3693 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3816 " data_direction: %d\n", cmd, cmd->data_direction); 3694 " data_direction: %d\n", cmd, cmd->data_direction);
3817 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); 3695 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
3818} 3696}
@@ -3837,49 +3715,34 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3837 if (task->se_dev) 3715 if (task->se_dev)
3838 task->se_dev->transport->free_task(task); 3716 task->se_dev->transport->free_task(task);
3839 else 3717 else
3840 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", 3718 pr_err("task[%u] - task->se_dev is NULL\n",
3841 task->task_no); 3719 task->task_no);
3842 spin_lock_irqsave(&cmd->t_state_lock, flags); 3720 spin_lock_irqsave(&cmd->t_state_lock, flags);
3843 } 3721 }
3844 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3845} 3723}
3846 3724
3847static inline void transport_free_pages(struct se_cmd *cmd) 3725static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3848{ 3726{
3849 struct scatterlist *sg; 3727 struct scatterlist *sg;
3850 int free_page = 1;
3851 int count; 3728 int count;
3852 3729
3853 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 3730 for_each_sg(sgl, sg, nents, count)
3854 free_page = 0; 3731 __free_page(sg_page(sg));
3855 if (cmd->se_dev->transport->do_se_mem_map)
3856 free_page = 0;
3857 3732
3858 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) { 3733 kfree(sgl);
3859 /* 3734}
3860 * Only called if
3861 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3862 */
3863 if (free_page)
3864 __free_page(sg_page(sg));
3865 3735
3866 } 3736static inline void transport_free_pages(struct se_cmd *cmd)
3867 if (free_page) 3737{
3868 kfree(cmd->t_data_sg); 3738 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3739 return;
3740
3741 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3869 cmd->t_data_sg = NULL; 3742 cmd->t_data_sg = NULL;
3870 cmd->t_data_nents = 0; 3743 cmd->t_data_nents = 0;
3871 3744
3872 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 3745 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3873 /*
3874 * Only called if
3875 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3876 */
3877 if (free_page)
3878 __free_page(sg_page(sg));
3879
3880 }
3881 if (free_page)
3882 kfree(cmd->t_bidi_data_sg);
3883 cmd->t_bidi_data_sg = NULL; 3746 cmd->t_bidi_data_sg = NULL;
3884 cmd->t_bidi_data_nents = 0; 3747 cmd->t_bidi_data_nents = 0;
3885} 3748}
@@ -3895,7 +3758,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
3895 3758
3896 spin_lock_irqsave(&cmd->t_state_lock, flags); 3759 spin_lock_irqsave(&cmd->t_state_lock, flags);
3897 if (atomic_read(&cmd->t_fe_count)) { 3760 if (atomic_read(&cmd->t_fe_count)) {
3898 if (!(atomic_dec_and_test(&cmd->t_fe_count))) { 3761 if (!atomic_dec_and_test(&cmd->t_fe_count)) {
3899 spin_unlock_irqrestore(&cmd->t_state_lock, 3762 spin_unlock_irqrestore(&cmd->t_state_lock,
3900 flags); 3763 flags);
3901 return 1; 3764 return 1;
@@ -3903,7 +3766,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
3903 } 3766 }
3904 3767
3905 if (atomic_read(&cmd->t_se_count)) { 3768 if (atomic_read(&cmd->t_se_count)) {
3906 if (!(atomic_dec_and_test(&cmd->t_se_count))) { 3769 if (!atomic_dec_and_test(&cmd->t_se_count)) {
3907 spin_unlock_irqrestore(&cmd->t_state_lock, 3770 spin_unlock_irqrestore(&cmd->t_state_lock,
3908 flags); 3771 flags);
3909 return 1; 3772 return 1;
@@ -3922,7 +3785,7 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
3922 return; 3785 return;
3923 3786
3924 spin_lock_irqsave(&cmd->t_state_lock, flags); 3787 spin_lock_irqsave(&cmd->t_state_lock, flags);
3925 if (!(atomic_read(&cmd->transport_dev_active))) { 3788 if (!atomic_read(&cmd->transport_dev_active)) {
3926 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3789 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3927 goto free_pages; 3790 goto free_pages;
3928 } 3791 }
@@ -3953,7 +3816,7 @@ transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
3953 } 3816 }
3954 3817
3955 spin_lock_irqsave(&cmd->t_state_lock, flags); 3818 spin_lock_irqsave(&cmd->t_state_lock, flags);
3956 if (!(atomic_read(&cmd->transport_dev_active))) { 3819 if (!atomic_read(&cmd->transport_dev_active)) {
3957 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3820 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3958 goto free_pages; 3821 goto free_pages;
3959 } 3822 }
@@ -4027,7 +3890,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4027 DMA_FROM_DEVICE, 3890 DMA_FROM_DEVICE,
4028 cmd->t_bidi_data_sg, 3891 cmd->t_bidi_data_sg,
4029 cmd->t_bidi_data_nents); 3892 cmd->t_bidi_data_nents);
4030 if (!rc) { 3893 if (rc <= 0) {
4031 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3894 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4032 cmd->scsi_sense_reason = 3895 cmd->scsi_sense_reason =
4033 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3896 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4046,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4046 cmd->data_direction, 3909 cmd->data_direction,
4047 cmd->t_data_sg, 3910 cmd->t_data_sg,
4048 cmd->t_data_nents); 3911 cmd->t_data_nents);
4049 if (!task_cdbs) { 3912 if (task_cdbs <= 0) {
4050 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3913 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4051 cmd->scsi_sense_reason = 3914 cmd->scsi_sense_reason =
4052 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3915 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4094,12 +3957,6 @@ transport_generic_get_mem(struct se_cmd *cmd)
4094 struct page *page; 3957 struct page *page;
4095 int i = 0; 3958 int i = 0;
4096 3959
4097 /*
4098 * If the device uses memory mapping this is enough.
4099 */
4100 if (cmd->se_dev->transport->do_se_mem_map)
4101 return 0;
4102
4103 nents = DIV_ROUND_UP(length, PAGE_SIZE); 3960 nents = DIV_ROUND_UP(length, PAGE_SIZE);
4104 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 3961 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
4105 if (!cmd->t_data_sg) 3962 if (!cmd->t_data_sg)
@@ -4176,14 +4033,14 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4176 4033
4177 if (!sg_first) { 4034 if (!sg_first) {
4178 sg_first = task->task_sg; 4035 sg_first = task->task_sg;
4179 chained_nents = task->task_sg_num; 4036 chained_nents = task->task_sg_nents;
4180 } else { 4037 } else {
4181 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4038 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4182 chained_nents += task->task_sg_num; 4039 chained_nents += task->task_sg_nents;
4183 } 4040 }
4184 4041
4185 sg_prev = task->task_sg; 4042 sg_prev = task->task_sg;
4186 sg_prev_nents = task->task_sg_num; 4043 sg_prev_nents = task->task_sg_nents;
4187 } 4044 }
4188 /* 4045 /*
4189 * Setup the starting pointer and total t_tasks_sg_linked_no including 4046 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4192,19 +4049,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4192 cmd->t_tasks_sg_chained = sg_first; 4049 cmd->t_tasks_sg_chained = sg_first;
4193 cmd->t_tasks_sg_chained_no = chained_nents; 4050 cmd->t_tasks_sg_chained_no = chained_nents;
4194 4051
4195 DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 4052 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
4196 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, 4053 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
4197 cmd->t_tasks_sg_chained_no); 4054 cmd->t_tasks_sg_chained_no);
4198 4055
4199 for_each_sg(cmd->t_tasks_sg_chained, sg, 4056 for_each_sg(cmd->t_tasks_sg_chained, sg,
4200 cmd->t_tasks_sg_chained_no, i) { 4057 cmd->t_tasks_sg_chained_no, i) {
4201 4058
4202 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", 4059 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
4203 i, sg, sg_page(sg), sg->length, sg->offset); 4060 i, sg, sg_page(sg), sg->length, sg->offset);
4204 if (sg_is_chain(sg)) 4061 if (sg_is_chain(sg))
4205 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); 4062 pr_debug("SG: %p sg_is_chain=1\n", sg);
4206 if (sg_is_last(sg)) 4063 if (sg_is_last(sg))
4207 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); 4064 pr_debug("SG: %p sg_is_last=1\n", sg);
4208 } 4065 }
4209} 4066}
4210EXPORT_SYMBOL(transport_do_task_sg_chain); 4067EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -4266,25 +4123,25 @@ static int transport_allocate_data_tasks(
4266 * It's so much easier and only a waste when task_count > 1. 4123 * It's so much easier and only a waste when task_count > 1.
4267 * That is extremely rare. 4124 * That is extremely rare.
4268 */ 4125 */
4269 task->task_sg_num = sgl_nents; 4126 task->task_sg_nents = sgl_nents;
4270 if (cmd->se_tfo->task_sg_chaining) { 4127 if (cmd->se_tfo->task_sg_chaining) {
4271 task->task_sg_num++; 4128 task->task_sg_nents++;
4272 task->task_padded_sg = 1; 4129 task->task_padded_sg = 1;
4273 } 4130 }
4274 4131
4275 task->task_sg = kmalloc(sizeof(struct scatterlist) * \ 4132 task->task_sg = kmalloc(sizeof(struct scatterlist) * \
4276 task->task_sg_num, GFP_KERNEL); 4133 task->task_sg_nents, GFP_KERNEL);
4277 if (!task->task_sg) { 4134 if (!task->task_sg) {
4278 cmd->se_dev->transport->free_task(task); 4135 cmd->se_dev->transport->free_task(task);
4279 return -ENOMEM; 4136 return -ENOMEM;
4280 } 4137 }
4281 4138
4282 sg_init_table(task->task_sg, task->task_sg_num); 4139 sg_init_table(task->task_sg, task->task_sg_nents);
4283 4140
4284 task_size = task->task_size; 4141 task_size = task->task_size;
4285 4142
4286 /* Build new sgl, only up to task_size */ 4143 /* Build new sgl, only up to task_size */
4287 for_each_sg(task->task_sg, sg, task->task_sg_num, count) { 4144 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
4288 if (cmd_sg->length > task_size) 4145 if (cmd_sg->length > task_size)
4289 break; 4146 break;
4290 4147
@@ -4311,6 +4168,7 @@ transport_allocate_control_task(struct se_cmd *cmd)
4311 unsigned char *cdb; 4168 unsigned char *cdb;
4312 struct se_task *task; 4169 struct se_task *task;
4313 unsigned long flags; 4170 unsigned long flags;
4171 int ret = 0;
4314 4172
4315 task = transport_generic_get_task(cmd, cmd->data_direction); 4173 task = transport_generic_get_task(cmd, cmd->data_direction);
4316 if (!task) 4174 if (!task)
@@ -4331,7 +4189,7 @@ transport_allocate_control_task(struct se_cmd *cmd)
4331 memcpy(task->task_sg, cmd->t_data_sg, 4189 memcpy(task->task_sg, cmd->t_data_sg,
4332 sizeof(struct scatterlist) * cmd->t_data_nents); 4190 sizeof(struct scatterlist) * cmd->t_data_nents);
4333 task->task_size = cmd->data_length; 4191 task->task_size = cmd->data_length;
4334 task->task_sg_num = cmd->t_data_nents; 4192 task->task_sg_nents = cmd->t_data_nents;
4335 4193
4336 spin_lock_irqsave(&cmd->t_state_lock, flags); 4194 spin_lock_irqsave(&cmd->t_state_lock, flags);
4337 list_add_tail(&task->t_list, &cmd->t_task_list); 4195 list_add_tail(&task->t_list, &cmd->t_task_list);
@@ -4339,16 +4197,19 @@ transport_allocate_control_task(struct se_cmd *cmd)
4339 4197
4340 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { 4198 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
4341 if (dev->transport->map_task_SG) 4199 if (dev->transport->map_task_SG)
4342 return dev->transport->map_task_SG(task); 4200 ret = dev->transport->map_task_SG(task);
4343 return 0;
4344 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { 4201 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
4345 if (dev->transport->cdb_none) 4202 if (dev->transport->cdb_none)
4346 return dev->transport->cdb_none(task); 4203 ret = dev->transport->cdb_none(task);
4347 return 0;
4348 } else { 4204 } else {
4205 pr_err("target: Unknown control cmd type!\n");
4349 BUG(); 4206 BUG();
4350 return -ENOMEM;
4351 } 4207 }
4208
4209 /* Success! Return number of tasks allocated */
4210 if (ret == 0)
4211 return 1;
4212 return ret;
4352} 4213}
4353 4214
4354static u32 transport_allocate_tasks( 4215static u32 transport_allocate_tasks(
@@ -4358,18 +4219,12 @@ static u32 transport_allocate_tasks(
4358 struct scatterlist *sgl, 4219 struct scatterlist *sgl,
4359 unsigned int sgl_nents) 4220 unsigned int sgl_nents)
4360{ 4221{
4361 int ret; 4222 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
4362
4363 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4364 return transport_allocate_data_tasks(cmd, lba, data_direction, 4223 return transport_allocate_data_tasks(cmd, lba, data_direction,
4365 sgl, sgl_nents); 4224 sgl, sgl_nents);
4366 } else { 4225 else
4367 ret = transport_allocate_control_task(cmd); 4226 return transport_allocate_control_task(cmd);
4368 if (ret < 0) 4227
4369 return ret;
4370 else
4371 return 1;
4372 }
4373} 4228}
4374 4229
4375 4230
@@ -4441,64 +4296,6 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
4441 */ 4296 */
4442void transport_generic_process_write(struct se_cmd *cmd) 4297void transport_generic_process_write(struct se_cmd *cmd)
4443{ 4298{
4444#if 0
4445 /*
4446 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
4447 * original EDTL
4448 */
4449 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
4450 if (!cmd->t_tasks_se_num) {
4451 unsigned char *dst, *buf =
4452 (unsigned char *)cmd->t_task_buf;
4453
4454 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
4455 if (!(dst)) {
4456 printk(KERN_ERR "Unable to allocate memory for"
4457 " WRITE underflow\n");
4458 transport_generic_request_failure(cmd, NULL,
4459 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4460 return;
4461 }
4462 memcpy(dst, buf, cmd->cmd_spdtl);
4463
4464 kfree(cmd->t_task_buf);
4465 cmd->t_task_buf = dst;
4466 } else {
4467 struct scatterlist *sg =
4468 (struct scatterlist *sg)cmd->t_task_buf;
4469 struct scatterlist *orig_sg;
4470
4471 orig_sg = kzalloc(sizeof(struct scatterlist) *
4472 cmd->t_tasks_se_num,
4473 GFP_KERNEL))) {
4474 if (!(orig_sg)) {
4475 printk(KERN_ERR "Unable to allocate memory"
4476 " for WRITE underflow\n");
4477 transport_generic_request_failure(cmd, NULL,
4478 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4479 return;
4480 }
4481
4482 memcpy(orig_sg, cmd->t_task_buf,
4483 sizeof(struct scatterlist) *
4484 cmd->t_tasks_se_num);
4485
4486 cmd->data_length = cmd->cmd_spdtl;
4487 /*
4488 * FIXME, clear out original struct se_task and state
4489 * information.
4490 */
4491 if (transport_generic_new_cmd(cmd) < 0) {
4492 transport_generic_request_failure(cmd, NULL,
4493 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4494 kfree(orig_sg);
4495 return;
4496 }
4497
4498 transport_memcpy_write_sg(cmd, orig_sg);
4499 }
4500 }
4501#endif
4502 transport_execute_tasks(cmd); 4299 transport_execute_tasks(cmd);
4503} 4300}
4504EXPORT_SYMBOL(transport_generic_process_write); 4301EXPORT_SYMBOL(transport_generic_process_write);
@@ -4554,7 +4351,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
4554 return PYX_TRANSPORT_WRITE_PENDING; 4351 return PYX_TRANSPORT_WRITE_PENDING;
4555 4352
4556queue_full: 4353queue_full:
4557 printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 4354 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
4558 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 4355 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
4559 transport_handle_queue_full(cmd, cmd->se_dev, 4356 transport_handle_queue_full(cmd, cmd->se_dev,
4560 transport_write_pending_qf); 4357 transport_write_pending_qf);
@@ -4586,7 +4383,7 @@ void transport_generic_free_cmd(
4586 4383
4587 if (cmd->se_lun) { 4384 if (cmd->se_lun) {
4588#if 0 4385#if 0
4589 printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" 4386 pr_debug("cmd: %p ITT: 0x%08x contains"
4590 " cmd->se_lun\n", cmd, 4387 " cmd->se_lun\n", cmd,
4591 cmd->se_tfo->get_task_tag(cmd)); 4388 cmd->se_tfo->get_task_tag(cmd));
4592#endif 4389#endif
@@ -4627,7 +4424,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4627 spin_lock_irqsave(&cmd->t_state_lock, flags); 4424 spin_lock_irqsave(&cmd->t_state_lock, flags);
4628 if (atomic_read(&cmd->t_transport_stop)) { 4425 if (atomic_read(&cmd->t_transport_stop)) {
4629 atomic_set(&cmd->transport_lun_stop, 0); 4426 atomic_set(&cmd->transport_lun_stop, 0);
4630 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" 4427 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4631 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); 4428 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4632 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4429 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4633 transport_cmd_check_stop(cmd, 1, 0); 4430 transport_cmd_check_stop(cmd, 1, 0);
@@ -4640,13 +4437,13 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4640 4437
4641 ret = transport_stop_tasks_for_cmd(cmd); 4438 ret = transport_stop_tasks_for_cmd(cmd);
4642 4439
4643 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" 4440 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4644 " %d\n", cmd, cmd->t_task_cdbs, ret); 4441 " %d\n", cmd, cmd->t_task_list_num, ret);
4645 if (!ret) { 4442 if (!ret) {
4646 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 4443 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4647 cmd->se_tfo->get_task_tag(cmd)); 4444 cmd->se_tfo->get_task_tag(cmd));
4648 wait_for_completion(&cmd->transport_lun_stop_comp); 4445 wait_for_completion(&cmd->transport_lun_stop_comp);
4649 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 4446 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4650 cmd->se_tfo->get_task_tag(cmd)); 4447 cmd->se_tfo->get_task_tag(cmd));
4651 } 4448 }
4652 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); 4449 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
@@ -4654,13 +4451,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4654 return 0; 4451 return 0;
4655} 4452}
4656 4453
4657/* #define DEBUG_CLEAR_LUN */
4658#ifdef DEBUG_CLEAR_LUN
4659#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
4660#else
4661#define DEBUG_CLEAR_L(x...)
4662#endif
4663
4664static void __transport_clear_lun_from_sessions(struct se_lun *lun) 4454static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4665{ 4455{
4666 struct se_cmd *cmd = NULL; 4456 struct se_cmd *cmd = NULL;
@@ -4682,7 +4472,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4682 * progress for the iscsi_cmd_t. 4472 * progress for the iscsi_cmd_t.
4683 */ 4473 */
4684 spin_lock(&cmd->t_state_lock); 4474 spin_lock(&cmd->t_state_lock);
4685 DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport" 4475 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4686 "_lun_stop for ITT: 0x%08x\n", 4476 "_lun_stop for ITT: 0x%08x\n",
4687 cmd->se_lun->unpacked_lun, 4477 cmd->se_lun->unpacked_lun,
4688 cmd->se_tfo->get_task_tag(cmd)); 4478 cmd->se_tfo->get_task_tag(cmd));
@@ -4691,8 +4481,8 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4691 4481
4692 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4482 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4693 4483
4694 if (!(cmd->se_lun)) { 4484 if (!cmd->se_lun) {
4695 printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", 4485 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4696 cmd->se_tfo->get_task_tag(cmd), 4486 cmd->se_tfo->get_task_tag(cmd),
4697 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4487 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4698 BUG(); 4488 BUG();
@@ -4701,7 +4491,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4701 * If the Storage engine still owns the iscsi_cmd_t, determine 4491 * If the Storage engine still owns the iscsi_cmd_t, determine
4702 * and/or stop its context. 4492 * and/or stop its context.
4703 */ 4493 */
4704 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" 4494 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4705 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 4495 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4706 cmd->se_tfo->get_task_tag(cmd)); 4496 cmd->se_tfo->get_task_tag(cmd));
4707 4497
@@ -4710,13 +4500,13 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4710 continue; 4500 continue;
4711 } 4501 }
4712 4502
4713 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 4503 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4714 "_wait_for_tasks(): SUCCESS\n", 4504 "_wait_for_tasks(): SUCCESS\n",
4715 cmd->se_lun->unpacked_lun, 4505 cmd->se_lun->unpacked_lun,
4716 cmd->se_tfo->get_task_tag(cmd)); 4506 cmd->se_tfo->get_task_tag(cmd));
4717 4507
4718 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4508 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4719 if (!(atomic_read(&cmd->transport_dev_active))) { 4509 if (!atomic_read(&cmd->transport_dev_active)) {
4720 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4510 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4721 goto check_cond; 4511 goto check_cond;
4722 } 4512 }
@@ -4741,7 +4531,7 @@ check_cond:
4741 */ 4531 */
4742 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4532 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4743 if (atomic_read(&cmd->transport_lun_fe_stop)) { 4533 if (atomic_read(&cmd->transport_lun_fe_stop)) {
4744 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" 4534 pr_debug("SE_LUN[%d] - Detected FE stop for"
4745 " struct se_cmd: %p ITT: 0x%08x\n", 4535 " struct se_cmd: %p ITT: 0x%08x\n",
4746 lun->unpacked_lun, 4536 lun->unpacked_lun,
4747 cmd, cmd->se_tfo->get_task_tag(cmd)); 4537 cmd, cmd->se_tfo->get_task_tag(cmd));
@@ -4753,7 +4543,7 @@ check_cond:
4753 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4543 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4754 continue; 4544 continue;
4755 } 4545 }
4756 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 4546 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4757 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 4547 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4758 4548
4759 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4549 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
@@ -4779,7 +4569,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
4779 kt = kthread_run(transport_clear_lun_thread, lun, 4569 kt = kthread_run(transport_clear_lun_thread, lun,
4780 "tcm_cl_%u", lun->unpacked_lun); 4570 "tcm_cl_%u", lun->unpacked_lun);
4781 if (IS_ERR(kt)) { 4571 if (IS_ERR(kt)) {
4782 printk(KERN_ERR "Unable to start clear_lun thread\n"); 4572 pr_err("Unable to start clear_lun thread\n");
4783 return PTR_ERR(kt); 4573 return PTR_ERR(kt);
4784 } 4574 }
4785 wait_for_completion(&lun->lun_shutdown_comp); 4575 wait_for_completion(&lun->lun_shutdown_comp);
@@ -4812,7 +4602,7 @@ static void transport_generic_wait_for_tasks(
4812 */ 4602 */
4813 if (atomic_read(&cmd->transport_lun_stop)) { 4603 if (atomic_read(&cmd->transport_lun_stop)) {
4814 4604
4815 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" 4605 pr_debug("wait_for_tasks: Stopping"
4816 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 4606 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4817 "_stop_comp); for ITT: 0x%08x\n", 4607 "_stop_comp); for ITT: 0x%08x\n",
4818 cmd->se_tfo->get_task_tag(cmd)); 4608 cmd->se_tfo->get_task_tag(cmd));
@@ -4834,7 +4624,7 @@ static void transport_generic_wait_for_tasks(
4834 * struct se_cmd, now owns the structure and can be released through 4624 * struct se_cmd, now owns the structure and can be released through
4835 * normal means below. 4625 * normal means below.
4836 */ 4626 */
4837 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" 4627 pr_debug("wait_for_tasks: Stopped"
4838 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 4628 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4839 "stop_comp); for ITT: 0x%08x\n", 4629 "stop_comp); for ITT: 0x%08x\n",
4840 cmd->se_tfo->get_task_tag(cmd)); 4630 cmd->se_tfo->get_task_tag(cmd));
@@ -4847,7 +4637,7 @@ static void transport_generic_wait_for_tasks(
4847 4637
4848 atomic_set(&cmd->t_transport_stop, 1); 4638 atomic_set(&cmd->t_transport_stop, 1);
4849 4639
4850 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" 4640 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4851 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" 4641 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
4852 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), 4642 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
4853 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 4643 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
@@ -4863,7 +4653,7 @@ static void transport_generic_wait_for_tasks(
4863 atomic_set(&cmd->t_transport_active, 0); 4653 atomic_set(&cmd->t_transport_active, 0);
4864 atomic_set(&cmd->t_transport_stop, 0); 4654 atomic_set(&cmd->t_transport_stop, 0);
4865 4655
4866 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" 4656 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4867 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 4657 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4868 cmd->se_tfo->get_task_tag(cmd)); 4658 cmd->se_tfo->get_task_tag(cmd));
4869remove: 4659remove:
@@ -5071,11 +4861,11 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5071 int ret = 0; 4861 int ret = 0;
5072 4862
5073 if (atomic_read(&cmd->t_transport_aborted) != 0) { 4863 if (atomic_read(&cmd->t_transport_aborted) != 0) {
5074 if (!(send_status) || 4864 if (!send_status ||
5075 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 4865 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5076 return 1; 4866 return 1;
5077#if 0 4867#if 0
5078 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" 4868 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
5079 " status for CDB: 0x%02x ITT: 0x%08x\n", 4869 " status for CDB: 0x%02x ITT: 0x%08x\n",
5080 cmd->t_task_cdb[0], 4870 cmd->t_task_cdb[0],
5081 cmd->se_tfo->get_task_tag(cmd)); 4871 cmd->se_tfo->get_task_tag(cmd));
@@ -5107,7 +4897,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
5107 } 4897 }
5108 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4898 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5109#if 0 4899#if 0
5110 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 4900 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5111 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 4901 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
5112 cmd->se_tfo->get_task_tag(cmd)); 4902 cmd->se_tfo->get_task_tag(cmd));
5113#endif 4903#endif
@@ -5145,7 +4935,7 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
5145 tmr->response = TMR_FUNCTION_REJECTED; 4935 tmr->response = TMR_FUNCTION_REJECTED;
5146 break; 4936 break;
5147 default: 4937 default:
5148 printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", 4938 pr_err("Uknown TMR function: 0x%02x.\n",
5149 tmr->function); 4939 tmr->function);
5150 tmr->response = TMR_FUNCTION_REJECTED; 4940 tmr->response = TMR_FUNCTION_REJECTED;
5151 break; 4941 break;
@@ -5190,7 +4980,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5190 spin_lock_irqsave(&dev->execute_task_lock, flags); 4980 spin_lock_irqsave(&dev->execute_task_lock, flags);
5191 while ((task = transport_get_task_from_state_list(dev))) { 4981 while ((task = transport_get_task_from_state_list(dev))) {
5192 if (!task->task_se_cmd) { 4982 if (!task->task_se_cmd) {
5193 printk(KERN_ERR "task->task_se_cmd is NULL!\n"); 4983 pr_err("task->task_se_cmd is NULL!\n");
5194 continue; 4984 continue;
5195 } 4985 }
5196 cmd = task->task_se_cmd; 4986 cmd = task->task_se_cmd;
@@ -5199,18 +4989,18 @@ static void transport_processing_shutdown(struct se_device *dev)
5199 4989
5200 spin_lock_irqsave(&cmd->t_state_lock, flags); 4990 spin_lock_irqsave(&cmd->t_state_lock, flags);
5201 4991
5202 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," 4992 pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
5203 " i_state/def_i_state: %d/%d, t_state/def_t_state:" 4993 " i_state: %d, t_state/def_t_state:"
5204 " %d/%d cdb: 0x%02x\n", cmd, task, 4994 " %d/%d cdb: 0x%02x\n", cmd, task,
5205 cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, 4995 cmd->se_tfo->get_task_tag(cmd),
5206 cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, 4996 cmd->se_tfo->get_cmd_state(cmd),
5207 cmd->t_state, cmd->deferred_t_state, 4997 cmd->t_state, cmd->deferred_t_state,
5208 cmd->t_task_cdb[0]); 4998 cmd->t_task_cdb[0]);
5209 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" 4999 pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
5210 " %d t_task_cdbs_sent: %d -- t_transport_active: %d" 5000 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5211 " t_transport_stop: %d t_transport_sent: %d\n", 5001 " t_transport_stop: %d t_transport_sent: %d\n",
5212 cmd->se_tfo->get_task_tag(cmd), 5002 cmd->se_tfo->get_task_tag(cmd),
5213 cmd->t_task_cdbs, 5003 cmd->t_task_list_num,
5214 atomic_read(&cmd->t_task_cdbs_left), 5004 atomic_read(&cmd->t_task_cdbs_left),
5215 atomic_read(&cmd->t_task_cdbs_sent), 5005 atomic_read(&cmd->t_task_cdbs_sent),
5216 atomic_read(&cmd->t_transport_active), 5006 atomic_read(&cmd->t_transport_active),
@@ -5222,10 +5012,10 @@ static void transport_processing_shutdown(struct se_device *dev)
5222 spin_unlock_irqrestore( 5012 spin_unlock_irqrestore(
5223 &cmd->t_state_lock, flags); 5013 &cmd->t_state_lock, flags);
5224 5014
5225 DEBUG_DO("Waiting for task: %p to shutdown for dev:" 5015 pr_debug("Waiting for task: %p to shutdown for dev:"
5226 " %p\n", task, dev); 5016 " %p\n", task, dev);
5227 wait_for_completion(&task->task_stop_comp); 5017 wait_for_completion(&task->task_stop_comp);
5228 DEBUG_DO("Completed task: %p shutdown for dev: %p\n", 5018 pr_debug("Completed task: %p shutdown for dev: %p\n",
5229 task, dev); 5019 task, dev);
5230 5020
5231 spin_lock_irqsave(&cmd->t_state_lock, flags); 5021 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -5239,11 +5029,11 @@ static void transport_processing_shutdown(struct se_device *dev)
5239 } 5029 }
5240 __transport_stop_task_timer(task, &flags); 5030 __transport_stop_task_timer(task, &flags);
5241 5031
5242 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { 5032 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
5243 spin_unlock_irqrestore( 5033 spin_unlock_irqrestore(
5244 &cmd->t_state_lock, flags); 5034 &cmd->t_state_lock, flags);
5245 5035
5246 DEBUG_DO("Skipping task: %p, dev: %p for" 5036 pr_debug("Skipping task: %p, dev: %p for"
5247 " t_task_cdbs_ex_left: %d\n", task, dev, 5037 " t_task_cdbs_ex_left: %d\n", task, dev,
5248 atomic_read(&cmd->t_task_cdbs_ex_left)); 5038 atomic_read(&cmd->t_task_cdbs_ex_left));
5249 5039
@@ -5252,7 +5042,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5252 } 5042 }
5253 5043
5254 if (atomic_read(&cmd->t_transport_active)) { 5044 if (atomic_read(&cmd->t_transport_active)) {
5255 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" 5045 pr_debug("got t_transport_active = 1 for task: %p, dev:"
5256 " %p\n", task, dev); 5046 " %p\n", task, dev);
5257 5047
5258 if (atomic_read(&cmd->t_fe_count)) { 5048 if (atomic_read(&cmd->t_fe_count)) {
@@ -5282,7 +5072,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5282 spin_lock_irqsave(&dev->execute_task_lock, flags); 5072 spin_lock_irqsave(&dev->execute_task_lock, flags);
5283 continue; 5073 continue;
5284 } 5074 }
5285 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", 5075 pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
5286 task, dev); 5076 task, dev);
5287 5077
5288 if (atomic_read(&cmd->t_fe_count)) { 5078 if (atomic_read(&cmd->t_fe_count)) {
@@ -5315,7 +5105,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5315 */ 5105 */
5316 while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { 5106 while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
5317 5107
5318 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", 5108 pr_debug("From Device Queue: cmd: %p t_state: %d\n",
5319 cmd, cmd->t_state); 5109 cmd, cmd->t_state);
5320 5110
5321 if (atomic_read(&cmd->t_fe_count)) { 5111 if (atomic_read(&cmd->t_fe_count)) {
@@ -5368,8 +5158,8 @@ get_cmd:
5368 5158
5369 switch (cmd->t_state) { 5159 switch (cmd->t_state) {
5370 case TRANSPORT_NEW_CMD_MAP: 5160 case TRANSPORT_NEW_CMD_MAP:
5371 if (!(cmd->se_tfo->new_cmd_map)) { 5161 if (!cmd->se_tfo->new_cmd_map) {
5372 printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" 5162 pr_err("cmd->se_tfo->new_cmd_map is"
5373 " NULL for TRANSPORT_NEW_CMD_MAP\n"); 5163 " NULL for TRANSPORT_NEW_CMD_MAP\n");
5374 BUG(); 5164 BUG();
5375 } 5165 }
@@ -5420,7 +5210,7 @@ get_cmd:
5420 transport_generic_write_pending(cmd); 5210 transport_generic_write_pending(cmd);
5421 break; 5211 break;
5422 default: 5212 default:
5423 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" 5213 pr_err("Unknown t_state: %d deferred_t_state:"
5424 " %d for ITT: 0x%08x i_state: %d on SE LUN:" 5214 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
5425 " %u\n", cmd->t_state, cmd->deferred_t_state, 5215 " %u\n", cmd->t_state, cmd->deferred_t_state,
5426 cmd->se_tfo->get_task_tag(cmd), 5216 cmd->se_tfo->get_task_tag(cmd),
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index d28e9c4a1c99..31e3c652527e 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -49,15 +49,15 @@ int core_scsi3_ua_check(
49 struct se_session *sess = cmd->se_sess; 49 struct se_session *sess = cmd->se_sess;
50 struct se_node_acl *nacl; 50 struct se_node_acl *nacl;
51 51
52 if (!(sess)) 52 if (!sess)
53 return 0; 53 return 0;
54 54
55 nacl = sess->se_node_acl; 55 nacl = sess->se_node_acl;
56 if (!(nacl)) 56 if (!nacl)
57 return 0; 57 return 0;
58 58
59 deve = &nacl->device_list[cmd->orig_fe_lun]; 59 deve = &nacl->device_list[cmd->orig_fe_lun];
60 if (!(atomic_read(&deve->ua_count))) 60 if (!atomic_read(&deve->ua_count))
61 return 0; 61 return 0;
62 /* 62 /*
63 * From sam4r14, section 5.14 Unit attention condition: 63 * From sam4r14, section 5.14 Unit attention condition:
@@ -97,12 +97,12 @@ int core_scsi3_ua_allocate(
97 /* 97 /*
98 * PASSTHROUGH OPS 98 * PASSTHROUGH OPS
99 */ 99 */
100 if (!(nacl)) 100 if (!nacl)
101 return -EINVAL; 101 return -EINVAL;
102 102
103 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); 103 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
104 if (!(ua)) { 104 if (!ua) {
105 printk(KERN_ERR "Unable to allocate struct se_ua\n"); 105 pr_err("Unable to allocate struct se_ua\n");
106 return -ENOMEM; 106 return -ENOMEM;
107 } 107 }
108 INIT_LIST_HEAD(&ua->ua_dev_list); 108 INIT_LIST_HEAD(&ua->ua_dev_list);
@@ -177,7 +177,7 @@ int core_scsi3_ua_allocate(
177 spin_unlock(&deve->ua_lock); 177 spin_unlock(&deve->ua_lock);
178 spin_unlock_irq(&nacl->device_list_lock); 178 spin_unlock_irq(&nacl->device_list_lock);
179 179
180 printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" 180 pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
181 " 0x%02x, ASCQ: 0x%02x\n", 181 " 0x%02x, ASCQ: 0x%02x\n",
182 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 182 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
183 asc, ascq); 183 asc, ascq);
@@ -215,16 +215,16 @@ void core_scsi3_ua_for_check_condition(
215 struct se_ua *ua = NULL, *ua_p; 215 struct se_ua *ua = NULL, *ua_p;
216 int head = 1; 216 int head = 1;
217 217
218 if (!(sess)) 218 if (!sess)
219 return; 219 return;
220 220
221 nacl = sess->se_node_acl; 221 nacl = sess->se_node_acl;
222 if (!(nacl)) 222 if (!nacl)
223 return; 223 return;
224 224
225 spin_lock_irq(&nacl->device_list_lock); 225 spin_lock_irq(&nacl->device_list_lock);
226 deve = &nacl->device_list[cmd->orig_fe_lun]; 226 deve = &nacl->device_list[cmd->orig_fe_lun];
227 if (!(atomic_read(&deve->ua_count))) { 227 if (!atomic_read(&deve->ua_count)) {
228 spin_unlock_irq(&nacl->device_list_lock); 228 spin_unlock_irq(&nacl->device_list_lock);
229 return; 229 return;
230 } 230 }
@@ -264,7 +264,7 @@ void core_scsi3_ua_for_check_condition(
264 spin_unlock(&deve->ua_lock); 264 spin_unlock(&deve->ua_lock);
265 spin_unlock_irq(&nacl->device_list_lock); 265 spin_unlock_irq(&nacl->device_list_lock);
266 266
267 printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" 267 pr_debug("[%s]: %s UNIT ATTENTION condition with"
268 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" 268 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
269 " reported ASC: 0x%02x, ASCQ: 0x%02x\n", 269 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
270 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 270 nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
@@ -284,16 +284,16 @@ int core_scsi3_ua_clear_for_request_sense(
284 struct se_ua *ua = NULL, *ua_p; 284 struct se_ua *ua = NULL, *ua_p;
285 int head = 1; 285 int head = 1;
286 286
287 if (!(sess)) 287 if (!sess)
288 return -EINVAL; 288 return -EINVAL;
289 289
290 nacl = sess->se_node_acl; 290 nacl = sess->se_node_acl;
291 if (!(nacl)) 291 if (!nacl)
292 return -EINVAL; 292 return -EINVAL;
293 293
294 spin_lock_irq(&nacl->device_list_lock); 294 spin_lock_irq(&nacl->device_list_lock);
295 deve = &nacl->device_list[cmd->orig_fe_lun]; 295 deve = &nacl->device_list[cmd->orig_fe_lun];
296 if (!(atomic_read(&deve->ua_count))) { 296 if (!atomic_read(&deve->ua_count)) {
297 spin_unlock_irq(&nacl->device_list_lock); 297 spin_unlock_irq(&nacl->device_list_lock);
298 return -EPERM; 298 return -EPERM;
299 } 299 }
@@ -323,7 +323,7 @@ int core_scsi3_ua_clear_for_request_sense(
323 spin_unlock(&deve->ua_lock); 323 spin_unlock(&deve->ua_lock);
324 spin_unlock_irq(&nacl->device_list_lock); 324 spin_unlock_irq(&nacl->device_list_lock);
325 325
326 printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" 326 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
327 " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," 327 " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
328 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 328 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
329 cmd->orig_fe_lun, *asc, *ascq); 329 cmd->orig_fe_lun, *asc, *ascq);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 8d26779e440c..f7fff7ed63c3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -23,30 +23,6 @@
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */ 23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */ 24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25 25
26/*
27 * Debug options.
28 */
29#define FT_DEBUG_CONF 0x01 /* configuration messages */
30#define FT_DEBUG_SESS 0x02 /* session messages */
31#define FT_DEBUG_TM 0x04 /* TM operations */
32#define FT_DEBUG_IO 0x08 /* I/O commands */
33#define FT_DEBUG_DATA 0x10 /* Data transfer */
34
35extern unsigned int ft_debug_logging; /* debug options */
36
37#define FT_DEBUG(mask, fmt, args...) \
38 do { \
39 if (ft_debug_logging & (mask)) \
40 printk(KERN_INFO "tcm_fc: %s: " fmt, \
41 __func__, ##args); \
42 } while (0)
43
44#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
45#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
46#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
47#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
48#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
49
50struct ft_transport_id { 26struct ft_transport_id {
51 __u8 format; 27 __u8 format;
52 __u8 __resvd1[7]; 28 __u8 __resvd1[7];
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 9365e53947ad..a9e9a31da11d 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -62,22 +62,19 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
62 struct scatterlist *sg; 62 struct scatterlist *sg;
63 int count; 63 int count;
64 64
65 if (!(ft_debug_logging & FT_DEBUG_IO))
66 return;
67
68 se_cmd = &cmd->se_cmd; 65 se_cmd = &cmd->se_cmd;
69 printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 66 pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
70 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); 67 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
71 printk(KERN_INFO "%s: cmd %p cdb %p\n", 68 pr_debug("%s: cmd %p cdb %p\n",
72 caller, cmd, cmd->cdb); 69 caller, cmd, cmd->cdb);
73 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 70 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
74 71
75 printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", 72 pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
76 caller, cmd, se_cmd->t_data_nents, 73 caller, cmd, se_cmd->t_data_nents,
77 se_cmd->data_length, se_cmd->se_cmd_flags); 74 se_cmd->data_length, se_cmd->se_cmd_flags);
78 75
79 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) 76 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
80 printk(KERN_INFO "%s: cmd %p sg %p page %p " 77 pr_debug("%s: cmd %p sg %p page %p "
81 "len 0x%x off 0x%x\n", 78 "len 0x%x off 0x%x\n",
82 caller, cmd, sg, 79 caller, cmd, sg,
83 sg_page(sg), sg->length, sg->offset); 80 sg_page(sg), sg->length, sg->offset);
@@ -85,7 +82,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
85 sp = cmd->seq; 82 sp = cmd->seq;
86 if (sp) { 83 if (sp) {
87 ep = fc_seq_exch(sp); 84 ep = fc_seq_exch(sp);
88 printk(KERN_INFO "%s: cmd %p sid %x did %x " 85 pr_debug("%s: cmd %p sid %x did %x "
89 "ox_id %x rx_id %x seq_id %x e_stat %x\n", 86 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
90 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, 87 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
91 sp->id, ep->esb_stat); 88 sp->id, ep->esb_stat);
@@ -321,7 +318,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
321 case FC_RCTL_DD_SOL_CTL: /* transfer ready */ 318 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
322 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 319 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
323 default: 320 default:
324 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", 321 pr_debug("%s: unhandled frame r_ctl %x\n",
325 __func__, fh->fh_r_ctl); 322 __func__, fh->fh_r_ctl);
326 fc_frame_free(fp); 323 fc_frame_free(fp);
327 transport_generic_free_cmd(&cmd->se_cmd, 0, 0); 324 transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
@@ -346,7 +343,7 @@ static void ft_send_resp_status(struct fc_lport *lport,
346 struct fcp_resp_rsp_info *info; 343 struct fcp_resp_rsp_info *info;
347 344
348 fh = fc_frame_header_get(rx_fp); 345 fh = fc_frame_header_get(rx_fp);
349 FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n", 346 pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
350 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); 347 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
351 len = sizeof(*fcp); 348 len = sizeof(*fcp);
352 if (status == SAM_STAT_GOOD) 349 if (status == SAM_STAT_GOOD)
@@ -416,15 +413,15 @@ static void ft_send_tm(struct ft_cmd *cmd)
416 * FCP4r01 indicates having a combination of 413 * FCP4r01 indicates having a combination of
417 * tm_flags set is invalid. 414 * tm_flags set is invalid.
418 */ 415 */
419 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); 416 pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
420 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); 417 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
421 return; 418 return;
422 } 419 }
423 420
424 FT_TM_DBG("alloc tm cmd fn %d\n", tm_func); 421 pr_debug("alloc tm cmd fn %d\n", tm_func);
425 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); 422 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
426 if (!tmr) { 423 if (!tmr) {
427 FT_TM_DBG("alloc failed\n"); 424 pr_debug("alloc failed\n");
428 ft_send_resp_code(cmd, FCP_TMF_FAILED); 425 ft_send_resp_code(cmd, FCP_TMF_FAILED);
429 return; 426 return;
430 } 427 }
@@ -439,7 +436,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
439 * since "unable to handle TMR request because failed 436 * since "unable to handle TMR request because failed
440 * to get to LUN" 437 * to get to LUN"
441 */ 438 */
442 FT_TM_DBG("Failed to get LUN for TMR func %d, " 439 pr_debug("Failed to get LUN for TMR func %d, "
443 "se_cmd %p, unpacked_lun %d\n", 440 "se_cmd %p, unpacked_lun %d\n",
444 tm_func, &cmd->se_cmd, cmd->lun); 441 tm_func, &cmd->se_cmd, cmd->lun);
445 ft_dump_cmd(cmd, __func__); 442 ft_dump_cmd(cmd, __func__);
@@ -490,7 +487,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
490 code = FCP_TMF_FAILED; 487 code = FCP_TMF_FAILED;
491 break; 488 break;
492 } 489 }
493 FT_TM_DBG("tmr fn %d resp %d fcp code %d\n", 490 pr_debug("tmr fn %d resp %d fcp code %d\n",
494 tmr->function, tmr->response, code); 491 tmr->function, tmr->response, code);
495 ft_send_resp_code(cmd, code); 492 ft_send_resp_code(cmd, code);
496 return 0; 493 return 0;
@@ -518,7 +515,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
518 return; 515 return;
519 516
520busy: 517busy:
521 FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n"); 518 pr_debug("cmd or seq allocation failure - sending BUSY\n");
522 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); 519 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
523 fc_frame_free(fp); 520 fc_frame_free(fp);
524 ft_sess_put(sess); /* undo get from lookup */ 521 ft_sess_put(sess); /* undo get from lookup */
@@ -543,7 +540,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
543 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 540 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
544 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ 541 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
545 default: 542 default:
546 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", 543 pr_debug("%s: unhandled frame r_ctl %x\n",
547 __func__, fh->fh_r_ctl); 544 __func__, fh->fh_r_ctl);
548 fc_frame_free(fp); 545 fc_frame_free(fp);
549 ft_sess_put(sess); /* undo get from lookup */ 546 ft_sess_put(sess); /* undo get from lookup */
@@ -642,7 +639,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
642 639
643 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); 640 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
644 641
645 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); 642 pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
646 ft_dump_cmd(cmd, __func__); 643 ft_dump_cmd(cmd, __func__);
647 644
648 if (ret == -ENOMEM) { 645 if (ret == -ENOMEM) {
@@ -672,7 +669,7 @@ err:
672 */ 669 */
673static void ft_exec_req(struct ft_cmd *cmd) 670static void ft_exec_req(struct ft_cmd *cmd)
674{ 671{
675 FT_IO_DBG("cmd state %x\n", cmd->state); 672 pr_debug("cmd state %x\n", cmd->state);
676 switch (cmd->state) { 673 switch (cmd->state) {
677 case FC_CMD_ST_NEW: 674 case FC_CMD_ST_NEW:
678 ft_send_cmd(cmd); 675 ft_send_cmd(cmd);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index ec9e40dc4514..d63e3dd3b180 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -106,7 +106,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
106 } 106 }
107 err = 4; 107 err = 4;
108fail: 108fail:
109 FT_CONF_DBG("err %u len %zu pos %u byte %u\n", 109 pr_debug("err %u len %zu pos %u byte %u\n",
110 err, cp - name, pos, byte); 110 err, cp - name, pos, byte);
111 return -1; 111 return -1;
112} 112}
@@ -216,7 +216,7 @@ static struct se_node_acl *ft_add_acl(
216 u64 wwpn; 216 u64 wwpn;
217 u32 q_depth; 217 u32 q_depth;
218 218
219 FT_CONF_DBG("add acl %s\n", name); 219 pr_debug("add acl %s\n", name);
220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
221 221
222 if (ft_parse_wwn(name, &wwpn, 1) < 0) 222 if (ft_parse_wwn(name, &wwpn, 1) < 0)
@@ -239,11 +239,11 @@ static void ft_del_acl(struct se_node_acl *se_acl)
239 struct ft_node_acl *acl = container_of(se_acl, 239 struct ft_node_acl *acl = container_of(se_acl,
240 struct ft_node_acl, se_node_acl); 240 struct ft_node_acl, se_node_acl);
241 241
242 FT_CONF_DBG("del acl %s\n", 242 pr_debug("del acl %s\n",
243 config_item_name(&se_acl->acl_group.cg_item)); 243 config_item_name(&se_acl->acl_group.cg_item));
244 244
245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
246 FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n", 246 pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
247 acl, se_acl, tpg, &tpg->se_tpg); 247 acl, se_acl, tpg, &tpg->se_tpg);
248 248
249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); 249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
@@ -260,11 +260,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
260 spin_lock_bh(&se_tpg->acl_node_lock); 260 spin_lock_bh(&se_tpg->acl_node_lock);
261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
263 FT_CONF_DBG("acl %p port_name %llx\n", 263 pr_debug("acl %p port_name %llx\n",
264 acl, (unsigned long long)acl->node_auth.port_name); 264 acl, (unsigned long long)acl->node_auth.port_name);
265 if (acl->node_auth.port_name == rdata->ids.port_name || 265 if (acl->node_auth.port_name == rdata->ids.port_name ||
266 acl->node_auth.node_name == rdata->ids.node_name) { 266 acl->node_auth.node_name == rdata->ids.node_name) {
267 FT_CONF_DBG("acl %p port_name %llx matched\n", acl, 267 pr_debug("acl %p port_name %llx matched\n", acl,
268 (unsigned long long)rdata->ids.port_name); 268 (unsigned long long)rdata->ids.port_name);
269 found = acl; 269 found = acl;
270 /* XXX need to hold onto ACL */ 270 /* XXX need to hold onto ACL */
@@ -281,10 +281,10 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
281 281
282 acl = kzalloc(sizeof(*acl), GFP_KERNEL); 282 acl = kzalloc(sizeof(*acl), GFP_KERNEL);
283 if (!acl) { 283 if (!acl) {
284 printk(KERN_ERR "Unable to allocate struct ft_node_acl\n"); 284 pr_err("Unable to allocate struct ft_node_acl\n");
285 return NULL; 285 return NULL;
286 } 286 }
287 FT_CONF_DBG("acl %p\n", acl); 287 pr_debug("acl %p\n", acl);
288 return &acl->se_node_acl; 288 return &acl->se_node_acl;
289} 289}
290 290
@@ -294,7 +294,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
294 struct ft_node_acl *acl = container_of(se_acl, 294 struct ft_node_acl *acl = container_of(se_acl,
295 struct ft_node_acl, se_node_acl); 295 struct ft_node_acl, se_node_acl);
296 296
297 FT_CONF_DBG(KERN_INFO "acl %p\n", acl); 297 pr_debug("acl %p\n", acl);
298 kfree(acl); 298 kfree(acl);
299} 299}
300 300
@@ -311,7 +311,7 @@ static struct se_portal_group *ft_add_tpg(
311 unsigned long index; 311 unsigned long index;
312 int ret; 312 int ret;
313 313
314 FT_CONF_DBG("tcm_fc: add tpg %s\n", name); 314 pr_debug("tcm_fc: add tpg %s\n", name);
315 315
316 /* 316 /*
317 * Name must be "tpgt_" followed by the index. 317 * Name must be "tpgt_" followed by the index.
@@ -354,7 +354,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
354{ 354{
355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
356 356
357 FT_CONF_DBG("del tpg %s\n", 357 pr_debug("del tpg %s\n",
358 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 358 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
359 359
360 kthread_stop(tpg->thread); 360 kthread_stop(tpg->thread);
@@ -412,7 +412,7 @@ static struct se_wwn *ft_add_lport(
412 struct ft_lport_acl *old_lacl; 412 struct ft_lport_acl *old_lacl;
413 u64 wwpn; 413 u64 wwpn;
414 414
415 FT_CONF_DBG("add lport %s\n", name); 415 pr_debug("add lport %s\n", name);
416 if (ft_parse_wwn(name, &wwpn, 1) < 0) 416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
417 return NULL; 417 return NULL;
418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); 418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
@@ -441,7 +441,7 @@ static void ft_del_lport(struct se_wwn *wwn)
441 struct ft_lport_acl *lacl = container_of(wwn, 441 struct ft_lport_acl *lacl = container_of(wwn,
442 struct ft_lport_acl, fc_lport_wwn); 442 struct ft_lport_acl, fc_lport_wwn);
443 443
444 FT_CONF_DBG("del lport %s\n", 444 pr_debug("del lport %s\n",
445 config_item_name(&wwn->wwn_group.cg_item)); 445 config_item_name(&wwn->wwn_group.cg_item));
446 mutex_lock(&ft_lport_lock); 446 mutex_lock(&ft_lport_lock);
447 list_del(&lacl->list); 447 list_del(&lacl->list);
@@ -581,7 +581,7 @@ int ft_register_configfs(void)
581 */ 581 */
582 fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); 582 fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
583 if (IS_ERR(fabric)) { 583 if (IS_ERR(fabric)) {
584 printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", 584 pr_err("%s: target_fabric_configfs_init() failed!\n",
585 __func__); 585 __func__);
586 return PTR_ERR(fabric); 586 return PTR_ERR(fabric);
587 } 587 }
@@ -608,11 +608,8 @@ int ft_register_configfs(void)
608 */ 608 */
609 ret = target_fabric_configfs_register(fabric); 609 ret = target_fabric_configfs_register(fabric);
610 if (ret < 0) { 610 if (ret < 0) {
611 FT_CONF_DBG("target_fabric_configfs_register() for" 611 pr_debug("target_fabric_configfs_register() for"
612 " FC Target failed!\n"); 612 " FC Target failed!\n");
613 printk(KERN_INFO
614 "%s: target_fabric_configfs_register() failed!\n",
615 __func__);
616 target_fabric_configfs_free(fabric); 613 target_fabric_configfs_free(fabric);
617 return -1; 614 return -1;
618 } 615 }
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 3563a9029c4a..11e6483fc127 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -39,6 +39,7 @@
39#include <linux/configfs.h> 39#include <linux/configfs.h>
40#include <linux/ctype.h> 40#include <linux/ctype.h>
41#include <linux/hash.h> 41#include <linux/hash.h>
42#include <linux/ratelimit.h>
42#include <asm/unaligned.h> 43#include <asm/unaligned.h>
43#include <scsi/scsi.h> 44#include <scsi/scsi.h>
44#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
@@ -176,8 +177,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
176 error = lport->tt.seq_send(lport, cmd->seq, fp); 177 error = lport->tt.seq_send(lport, cmd->seq, fp);
177 if (error) { 178 if (error) {
178 /* XXX For now, initiator will retry */ 179 /* XXX For now, initiator will retry */
179 if (printk_ratelimit()) 180 pr_err_ratelimited("%s: Failed to send frame %p, "
180 printk(KERN_ERR "%s: Failed to send frame %p, "
181 "xid <0x%x>, remaining %zu, " 181 "xid <0x%x>, remaining %zu, "
182 "lso_max <0x%x>\n", 182 "lso_max <0x%x>\n",
183 __func__, fp, ep->xid, 183 __func__, fp, ep->xid,
@@ -222,7 +222,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
222 */ 222 */
223 buf = fc_frame_payload_get(fp, 1); 223 buf = fc_frame_payload_get(fp, 1);
224 if (cmd->was_ddp_setup && buf) { 224 if (cmd->was_ddp_setup && buf) {
225 printk(KERN_INFO "%s: When DDP was setup, not expected to" 225 pr_debug("%s: When DDP was setup, not expected to"
226 "receive frame with payload, Payload shall be" 226 "receive frame with payload, Payload shall be"
227 "copied directly to buffer instead of coming " 227 "copied directly to buffer instead of coming "
228 "via. legacy receive queues\n", __func__); 228 "via. legacy receive queues\n", __func__);
@@ -260,7 +260,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
260 * this point, but just in case if required in future 260 * this point, but just in case if required in future
261 * for debugging or any other purpose 261 * for debugging or any other purpose
262 */ 262 */
263 printk(KERN_ERR "%s: Received frame with TSI bit not" 263 pr_err("%s: Received frame with TSI bit not"
264 " being SET, dropping the frame, " 264 " being SET, dropping the frame, "
265 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", 265 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
266 __func__, cmd->sg, cmd->sg_cnt); 266 __func__, cmd->sg, cmd->sg_cnt);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 7491e21cc6ae..fbcbb3d1d06b 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -198,13 +198,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
198 if (sess->port_id == port_id) { 198 if (sess->port_id == port_id) {
199 kref_get(&sess->kref); 199 kref_get(&sess->kref);
200 rcu_read_unlock(); 200 rcu_read_unlock();
201 FT_SESS_DBG("port_id %x found %p\n", port_id, sess); 201 pr_debug("port_id %x found %p\n", port_id, sess);
202 return sess; 202 return sess;
203 } 203 }
204 } 204 }
205out: 205out:
206 rcu_read_unlock(); 206 rcu_read_unlock();
207 FT_SESS_DBG("port_id %x not found\n", port_id); 207 pr_debug("port_id %x not found\n", port_id);
208 return NULL; 208 return NULL;
209} 209}
210 210
@@ -240,7 +240,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
240 hlist_add_head_rcu(&sess->hash, head); 240 hlist_add_head_rcu(&sess->hash, head);
241 tport->sess_count++; 241 tport->sess_count++;
242 242
243 FT_SESS_DBG("port_id %x sess %p\n", port_id, sess); 243 pr_debug("port_id %x sess %p\n", port_id, sess);
244 244
245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, 245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
246 sess->se_sess, sess); 246 sess->se_sess, sess);
@@ -314,7 +314,7 @@ int ft_sess_shutdown(struct se_session *se_sess)
314{ 314{
315 struct ft_sess *sess = se_sess->fabric_sess_ptr; 315 struct ft_sess *sess = se_sess->fabric_sess_ptr;
316 316
317 FT_SESS_DBG("port_id %x\n", sess->port_id); 317 pr_debug("port_id %x\n", sess->port_id);
318 return 1; 318 return 1;
319} 319}
320 320
@@ -335,7 +335,7 @@ void ft_sess_close(struct se_session *se_sess)
335 mutex_unlock(&ft_lport_lock); 335 mutex_unlock(&ft_lport_lock);
336 return; 336 return;
337 } 337 }
338 FT_SESS_DBG("port_id %x\n", port_id); 338 pr_debug("port_id %x\n", port_id);
339 ft_sess_unhash(sess); 339 ft_sess_unhash(sess);
340 mutex_unlock(&ft_lport_lock); 340 mutex_unlock(&ft_lport_lock);
341 transport_deregister_session_configfs(se_sess); 341 transport_deregister_session_configfs(se_sess);
@@ -348,7 +348,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
348{ 348{
349 struct ft_sess *sess = se_sess->fabric_sess_ptr; 349 struct ft_sess *sess = se_sess->fabric_sess_ptr;
350 350
351 FT_SESS_DBG("port_id %x\n", sess->port_id); 351 pr_debug("port_id %x\n", sess->port_id);
352} 352}
353 353
354int ft_sess_logged_in(struct se_session *se_sess) 354int ft_sess_logged_in(struct se_session *se_sess)
@@ -458,7 +458,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
458 mutex_lock(&ft_lport_lock); 458 mutex_lock(&ft_lport_lock);
459 ret = ft_prli_locked(rdata, spp_len, rspp, spp); 459 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
460 mutex_unlock(&ft_lport_lock); 460 mutex_unlock(&ft_lport_lock);
461 FT_SESS_DBG("port_id %x flags %x ret %x\n", 461 pr_debug("port_id %x flags %x ret %x\n",
462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); 462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
463 return ret; 463 return ret;
464} 464}
@@ -518,11 +518,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
518 struct ft_sess *sess; 518 struct ft_sess *sess;
519 u32 sid = fc_frame_sid(fp); 519 u32 sid = fc_frame_sid(fp);
520 520
521 FT_SESS_DBG("sid %x\n", sid); 521 pr_debug("sid %x\n", sid);
522 522
523 sess = ft_sess_get(lport, sid); 523 sess = ft_sess_get(lport, sid);
524 if (!sess) { 524 if (!sess) {
525 FT_SESS_DBG("sid %x sess lookup failed\n", sid); 525 pr_debug("sid %x sess lookup failed\n", sid);
526 /* TBD XXX - if FCP_CMND, send PRLO */ 526 /* TBD XXX - if FCP_CMND, send PRLO */
527 fc_frame_free(fp); 527 fc_frame_free(fp);
528 return; 528 return;