aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_device.c
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-06-08 13:36:43 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:48 -0400
commit6708bb27bb2703da238f21f516034263348af5be (patch)
treea23e1f9eab22933d773d6b6ad6263d6751379a00 /drivers/target/target_core_device.c
parentec98f7825c6eaa4a9afb0eb518826efc8a2ed4a2 (diff)
target: Follow up core updates from AGrover and HCH (round 4)
This patch contains the squashed version of forth round series cleanups from Andy and Christoph following the post heavy lifting in the preceeding: 'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather' changes. This also includes a conversion of target core and the v3.0 mainline fabric modules (loopback and tcm_fc) to use pr_debug and the CONFIG_DYNAMIC_DEBUG infrastructure! These have been squashed into this third and final round for v3.1. target: Remove ifdeffed code in t_g_process_write target: Remove direct ramdisk code target: Rename task_sg_num to task_sg_nents target: Remove custom debug macros for pr_debug. Use pr_err(). target: Remove custom debug macros in mainline fabrics target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0 target: Remove transport do_se_mem_map callback target: Further simplify transport_free_pages target: Redo task allocation return value handling target: Remove extra parentheses target: change alloc_task call to take *cdb, not *cmd (nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev) Signed-off-by: Andy Grover <agrover@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r--drivers/target/target_core_device.c214
1 files changed, 107 insertions, 107 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 1185c3b76d47..81860ddc7cc4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -84,7 +84,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
87 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 87 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
88 " Access for 0x%08x\n", 88 " Access for 0x%08x\n",
89 se_cmd->se_tfo->get_fabric_name(), 89 se_cmd->se_tfo->get_fabric_name(),
90 unpacked_lun); 90 unpacked_lun);
@@ -117,7 +117,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
117 if (unpacked_lun != 0) { 117 if (unpacked_lun != 0) {
118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
120 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 120 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
121 " Access for 0x%08x\n", 121 " Access for 0x%08x\n",
122 se_cmd->se_tfo->get_fabric_name(), 122 se_cmd->se_tfo->get_fabric_name(),
123 unpacked_lun); 123 unpacked_lun);
@@ -204,7 +204,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
204 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 204 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
205 205
206 if (!se_lun) { 206 if (!se_lun) {
207 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 207 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
208 " Access for 0x%08x\n", 208 " Access for 0x%08x\n",
209 se_cmd->se_tfo->get_fabric_name(), 209 se_cmd->se_tfo->get_fabric_name(),
210 unpacked_lun); 210 unpacked_lun);
@@ -255,15 +255,15 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
255 continue; 255 continue;
256 256
257 lun = deve->se_lun; 257 lun = deve->se_lun;
258 if (!(lun)) { 258 if (!lun) {
259 printk(KERN_ERR "%s device entries device pointer is" 259 pr_err("%s device entries device pointer is"
260 " NULL, but Initiator has access.\n", 260 " NULL, but Initiator has access.\n",
261 tpg->se_tpg_tfo->get_fabric_name()); 261 tpg->se_tpg_tfo->get_fabric_name());
262 continue; 262 continue;
263 } 263 }
264 port = lun->lun_sep; 264 port = lun->lun_sep;
265 if (!(port)) { 265 if (!port) {
266 printk(KERN_ERR "%s device entries device pointer is" 266 pr_err("%s device entries device pointer is"
267 " NULL, but Initiator has access.\n", 267 " NULL, but Initiator has access.\n",
268 tpg->se_tpg_tfo->get_fabric_name()); 268 tpg->se_tpg_tfo->get_fabric_name());
269 continue; 269 continue;
@@ -301,7 +301,7 @@ int core_free_device_list_for_node(
301 continue; 301 continue;
302 302
303 if (!deve->se_lun) { 303 if (!deve->se_lun) {
304 printk(KERN_ERR "%s device entries device pointer is" 304 pr_err("%s device entries device pointer is"
305 " NULL, but Initiator has access.\n", 305 " NULL, but Initiator has access.\n",
306 tpg->se_tpg_tfo->get_fabric_name()); 306 tpg->se_tpg_tfo->get_fabric_name());
307 continue; 307 continue;
@@ -372,7 +372,7 @@ int core_update_device_list_for_node(
372 * struct se_dev_entry pointers below as logic in 372 * struct se_dev_entry pointers below as logic in
373 * core_alua_do_transition_tg_pt() depends on these being present. 373 * core_alua_do_transition_tg_pt() depends on these being present.
374 */ 374 */
375 if (!(enable)) { 375 if (!enable) {
376 /* 376 /*
377 * deve->se_lun_acl will be NULL for demo-mode created LUNs 377 * deve->se_lun_acl will be NULL for demo-mode created LUNs
378 * that have not been explicitly concerted to MappedLUNs -> 378 * that have not been explicitly concerted to MappedLUNs ->
@@ -395,14 +395,14 @@ int core_update_device_list_for_node(
395 */ 395 */
396 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 396 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
397 if (deve->se_lun_acl != NULL) { 397 if (deve->se_lun_acl != NULL) {
398 printk(KERN_ERR "struct se_dev_entry->se_lun_acl" 398 pr_err("struct se_dev_entry->se_lun_acl"
399 " already set for demo mode -> explict" 399 " already set for demo mode -> explict"
400 " LUN ACL transition\n"); 400 " LUN ACL transition\n");
401 spin_unlock_irq(&nacl->device_list_lock); 401 spin_unlock_irq(&nacl->device_list_lock);
402 return -EINVAL; 402 return -EINVAL;
403 } 403 }
404 if (deve->se_lun != lun) { 404 if (deve->se_lun != lun) {
405 printk(KERN_ERR "struct se_dev_entry->se_lun does" 405 pr_err("struct se_dev_entry->se_lun does"
406 " match passed struct se_lun for demo mode" 406 " match passed struct se_lun for demo mode"
407 " -> explict LUN ACL transition\n"); 407 " -> explict LUN ACL transition\n");
408 spin_unlock_irq(&nacl->device_list_lock); 408 spin_unlock_irq(&nacl->device_list_lock);
@@ -501,8 +501,8 @@ static struct se_port *core_alloc_port(struct se_device *dev)
501 struct se_port *port, *port_tmp; 501 struct se_port *port, *port_tmp;
502 502
503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
504 if (!(port)) { 504 if (!port) {
505 printk(KERN_ERR "Unable to allocate struct se_port\n"); 505 pr_err("Unable to allocate struct se_port\n");
506 return ERR_PTR(-ENOMEM); 506 return ERR_PTR(-ENOMEM);
507 } 507 }
508 INIT_LIST_HEAD(&port->sep_alua_list); 508 INIT_LIST_HEAD(&port->sep_alua_list);
@@ -513,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev)
513 513
514 spin_lock(&dev->se_port_lock); 514 spin_lock(&dev->se_port_lock);
515 if (dev->dev_port_count == 0x0000ffff) { 515 if (dev->dev_port_count == 0x0000ffff) {
516 printk(KERN_WARNING "Reached dev->dev_port_count ==" 516 pr_warn("Reached dev->dev_port_count =="
517 " 0x0000ffff\n"); 517 " 0x0000ffff\n");
518 spin_unlock(&dev->se_port_lock); 518 spin_unlock(&dev->se_port_lock);
519 return ERR_PTR(-ENOSPC); 519 return ERR_PTR(-ENOSPC);
@@ -532,7 +532,7 @@ again:
532 * 3h to FFFFh Relative port 3 through 65 535 532 * 3h to FFFFh Relative port 3 through 65 535
533 */ 533 */
534 port->sep_rtpi = dev->dev_rpti_counter++; 534 port->sep_rtpi = dev->dev_rpti_counter++;
535 if (!(port->sep_rtpi)) 535 if (!port->sep_rtpi)
536 goto again; 536 goto again;
537 537
538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
@@ -570,7 +570,7 @@ static void core_export_port(
570 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 570 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
573 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" 573 pr_err("Unable to allocate t10_alua_tg_pt"
574 "_gp_member_t\n"); 574 "_gp_member_t\n");
575 return; 575 return;
576 } 576 }
@@ -578,7 +578,7 @@ static void core_export_port(
578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
579 su_dev->t10_alua.default_tg_pt_gp); 579 su_dev->t10_alua.default_tg_pt_gp);
580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
581 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" 581 pr_debug("%s/%s: Adding to default ALUA Target Port"
582 " Group: alua/default_tg_pt_gp\n", 582 " Group: alua/default_tg_pt_gp\n",
583 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 583 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
584 } 584 }
@@ -663,8 +663,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) 663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
664 break; 664 break;
665 665
666 if (!(se_task)) { 666 if (!se_task) {
667 printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); 667 pr_err("Unable to locate struct se_task for struct se_cmd\n");
668 return PYX_TRANSPORT_LU_COMM_FAILURE; 668 return PYX_TRANSPORT_LU_COMM_FAILURE;
669 } 669 }
670 670
@@ -675,7 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
675 * coming via a target_core_mod PASSTHROUGH op, and not through 675 * coming via a target_core_mod PASSTHROUGH op, and not through
676 * a $FABRIC_MOD. In that case, report LUN=0 only. 676 * a $FABRIC_MOD. In that case, report LUN=0 only.
677 */ 677 */
678 if (!(se_sess)) { 678 if (!se_sess) {
679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
680 lun_count = 1; 680 lun_count = 1;
681 goto done; 681 goto done;
@@ -893,12 +893,12 @@ void se_dev_set_default_attribs(
893int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) 893int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
894{ 894{
895 if (task_timeout > DA_TASK_TIMEOUT_MAX) { 895 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
896 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" 896 pr_err("dev[%p]: Passed task_timeout: %u larger then"
897 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); 897 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
898 return -EINVAL; 898 return -EINVAL;
899 } else { 899 } else {
900 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; 900 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
901 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", 901 pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
902 dev, task_timeout); 902 dev, task_timeout);
903 } 903 }
904 904
@@ -910,7 +910,7 @@ int se_dev_set_max_unmap_lba_count(
910 u32 max_unmap_lba_count) 910 u32 max_unmap_lba_count)
911{ 911{
912 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 912 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
913 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", 913 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
914 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); 914 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
915 return 0; 915 return 0;
916} 916}
@@ -921,7 +921,7 @@ int se_dev_set_max_unmap_block_desc_count(
921{ 921{
922 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 922 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
923 max_unmap_block_desc_count; 923 max_unmap_block_desc_count;
924 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", 924 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
925 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); 925 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
926 return 0; 926 return 0;
927} 927}
@@ -931,7 +931,7 @@ int se_dev_set_unmap_granularity(
931 u32 unmap_granularity) 931 u32 unmap_granularity)
932{ 932{
933 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; 933 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
934 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", 934 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
935 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); 935 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
936 return 0; 936 return 0;
937} 937}
@@ -941,7 +941,7 @@ int se_dev_set_unmap_granularity_alignment(
941 u32 unmap_granularity_alignment) 941 u32 unmap_granularity_alignment)
942{ 942{
943 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 943 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
944 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", 944 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
945 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); 945 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
946 return 0; 946 return 0;
947} 947}
@@ -949,19 +949,19 @@ int se_dev_set_unmap_granularity_alignment(
949int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 949int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
950{ 950{
951 if ((flag != 0) && (flag != 1)) { 951 if ((flag != 0) && (flag != 1)) {
952 printk(KERN_ERR "Illegal value %d\n", flag); 952 pr_err("Illegal value %d\n", flag);
953 return -EINVAL; 953 return -EINVAL;
954 } 954 }
955 if (dev->transport->dpo_emulated == NULL) { 955 if (dev->transport->dpo_emulated == NULL) {
956 printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n"); 956 pr_err("dev->transport->dpo_emulated is NULL\n");
957 return -EINVAL; 957 return -EINVAL;
958 } 958 }
959 if (dev->transport->dpo_emulated(dev) == 0) { 959 if (dev->transport->dpo_emulated(dev) == 0) {
960 printk(KERN_ERR "dev->transport->dpo_emulated not supported\n"); 960 pr_err("dev->transport->dpo_emulated not supported\n");
961 return -EINVAL; 961 return -EINVAL;
962 } 962 }
963 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; 963 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
964 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" 964 pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
965 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); 965 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
966 return 0; 966 return 0;
967} 967}
@@ -969,19 +969,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
969int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 969int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
970{ 970{
971 if ((flag != 0) && (flag != 1)) { 971 if ((flag != 0) && (flag != 1)) {
972 printk(KERN_ERR "Illegal value %d\n", flag); 972 pr_err("Illegal value %d\n", flag);
973 return -EINVAL; 973 return -EINVAL;
974 } 974 }
975 if (dev->transport->fua_write_emulated == NULL) { 975 if (dev->transport->fua_write_emulated == NULL) {
976 printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n"); 976 pr_err("dev->transport->fua_write_emulated is NULL\n");
977 return -EINVAL; 977 return -EINVAL;
978 } 978 }
979 if (dev->transport->fua_write_emulated(dev) == 0) { 979 if (dev->transport->fua_write_emulated(dev) == 0) {
980 printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n"); 980 pr_err("dev->transport->fua_write_emulated not supported\n");
981 return -EINVAL; 981 return -EINVAL;
982 } 982 }
983 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 983 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
984 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 984 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
985 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); 985 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
986 return 0; 986 return 0;
987} 987}
@@ -989,19 +989,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
989int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 989int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
990{ 990{
991 if ((flag != 0) && (flag != 1)) { 991 if ((flag != 0) && (flag != 1)) {
992 printk(KERN_ERR "Illegal value %d\n", flag); 992 pr_err("Illegal value %d\n", flag);
993 return -EINVAL; 993 return -EINVAL;
994 } 994 }
995 if (dev->transport->fua_read_emulated == NULL) { 995 if (dev->transport->fua_read_emulated == NULL) {
996 printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n"); 996 pr_err("dev->transport->fua_read_emulated is NULL\n");
997 return -EINVAL; 997 return -EINVAL;
998 } 998 }
999 if (dev->transport->fua_read_emulated(dev) == 0) { 999 if (dev->transport->fua_read_emulated(dev) == 0) {
1000 printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n"); 1000 pr_err("dev->transport->fua_read_emulated not supported\n");
1001 return -EINVAL; 1001 return -EINVAL;
1002 } 1002 }
1003 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; 1003 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
1004 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", 1004 pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
1005 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); 1005 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
1006 return 0; 1006 return 0;
1007} 1007}
@@ -1009,19 +1009,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1009int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 1009int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1010{ 1010{
1011 if ((flag != 0) && (flag != 1)) { 1011 if ((flag != 0) && (flag != 1)) {
1012 printk(KERN_ERR "Illegal value %d\n", flag); 1012 pr_err("Illegal value %d\n", flag);
1013 return -EINVAL; 1013 return -EINVAL;
1014 } 1014 }
1015 if (dev->transport->write_cache_emulated == NULL) { 1015 if (dev->transport->write_cache_emulated == NULL) {
1016 printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n"); 1016 pr_err("dev->transport->write_cache_emulated is NULL\n");
1017 return -EINVAL; 1017 return -EINVAL;
1018 } 1018 }
1019 if (dev->transport->write_cache_emulated(dev) == 0) { 1019 if (dev->transport->write_cache_emulated(dev) == 0) {
1020 printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n"); 1020 pr_err("dev->transport->write_cache_emulated not supported\n");
1021 return -EINVAL; 1021 return -EINVAL;
1022 } 1022 }
1023 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 1023 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1024 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 1024 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1025 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); 1025 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1026 return 0; 1026 return 0;
1027} 1027}
@@ -1029,19 +1029,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1029int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 1029int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1030{ 1030{
1031 if ((flag != 0) && (flag != 1) && (flag != 2)) { 1031 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1032 printk(KERN_ERR "Illegal value %d\n", flag); 1032 pr_err("Illegal value %d\n", flag);
1033 return -EINVAL; 1033 return -EINVAL;
1034 } 1034 }
1035 1035
1036 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1036 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1037 printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1037 pr_err("dev[%p]: Unable to change SE Device"
1038 " UA_INTRLCK_CTRL while dev_export_obj: %d count" 1038 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1039 " exists\n", dev, 1039 " exists\n", dev,
1040 atomic_read(&dev->dev_export_obj.obj_access_count)); 1040 atomic_read(&dev->dev_export_obj.obj_access_count));
1041 return -EINVAL; 1041 return -EINVAL;
1042 } 1042 }
1043 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; 1043 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1044 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 1044 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1045 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); 1045 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1046 1046
1047 return 0; 1047 return 0;
@@ -1050,18 +1050,18 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1050int se_dev_set_emulate_tas(struct se_device *dev, int flag) 1050int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1051{ 1051{
1052 if ((flag != 0) && (flag != 1)) { 1052 if ((flag != 0) && (flag != 1)) {
1053 printk(KERN_ERR "Illegal value %d\n", flag); 1053 pr_err("Illegal value %d\n", flag);
1054 return -EINVAL; 1054 return -EINVAL;
1055 } 1055 }
1056 1056
1057 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1057 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1058 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" 1058 pr_err("dev[%p]: Unable to change SE Device TAS while"
1059 " dev_export_obj: %d count exists\n", dev, 1059 " dev_export_obj: %d count exists\n", dev,
1060 atomic_read(&dev->dev_export_obj.obj_access_count)); 1060 atomic_read(&dev->dev_export_obj.obj_access_count));
1061 return -EINVAL; 1061 return -EINVAL;
1062 } 1062 }
1063 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; 1063 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1064 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 1064 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1065 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 1065 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1066 1066
1067 return 0; 1067 return 0;
@@ -1070,20 +1070,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1070int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 1070int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1071{ 1071{
1072 if ((flag != 0) && (flag != 1)) { 1072 if ((flag != 0) && (flag != 1)) {
1073 printk(KERN_ERR "Illegal value %d\n", flag); 1073 pr_err("Illegal value %d\n", flag);
1074 return -EINVAL; 1074 return -EINVAL;
1075 } 1075 }
1076 /* 1076 /*
1077 * We expect this value to be non-zero when generic Block Layer 1077 * We expect this value to be non-zero when generic Block Layer
1078 * Discard supported is detected iblock_create_virtdevice(). 1078 * Discard supported is detected iblock_create_virtdevice().
1079 */ 1079 */
1080 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { 1080 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1081 printk(KERN_ERR "Generic Block Discard not supported\n"); 1081 pr_err("Generic Block Discard not supported\n");
1082 return -ENOSYS; 1082 return -ENOSYS;
1083 } 1083 }
1084 1084
1085 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; 1085 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1086 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 1086 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1087 dev, flag); 1087 dev, flag);
1088 return 0; 1088 return 0;
1089} 1089}
@@ -1091,20 +1091,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1091int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 1091int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1092{ 1092{
1093 if ((flag != 0) && (flag != 1)) { 1093 if ((flag != 0) && (flag != 1)) {
1094 printk(KERN_ERR "Illegal value %d\n", flag); 1094 pr_err("Illegal value %d\n", flag);
1095 return -EINVAL; 1095 return -EINVAL;
1096 } 1096 }
1097 /* 1097 /*
1098 * We expect this value to be non-zero when generic Block Layer 1098 * We expect this value to be non-zero when generic Block Layer
1099 * Discard supported is detected iblock_create_virtdevice(). 1099 * Discard supported is detected iblock_create_virtdevice().
1100 */ 1100 */
1101 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { 1101 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1102 printk(KERN_ERR "Generic Block Discard not supported\n"); 1102 pr_err("Generic Block Discard not supported\n");
1103 return -ENOSYS; 1103 return -ENOSYS;
1104 } 1104 }
1105 1105
1106 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; 1106 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1107 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 1107 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1108 dev, flag); 1108 dev, flag);
1109 return 0; 1109 return 0;
1110} 1110}
@@ -1112,11 +1112,11 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1112int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1112int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1113{ 1113{
1114 if ((flag != 0) && (flag != 1)) { 1114 if ((flag != 0) && (flag != 1)) {
1115 printk(KERN_ERR "Illegal value %d\n", flag); 1115 pr_err("Illegal value %d\n", flag);
1116 return -EINVAL; 1116 return -EINVAL;
1117 } 1117 }
1118 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; 1118 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1119 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1119 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1120 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1120 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1121 return 0; 1121 return 0;
1122} 1122}
@@ -1141,20 +1141,20 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1141 u32 orig_queue_depth = dev->queue_depth; 1141 u32 orig_queue_depth = dev->queue_depth;
1142 1142
1143 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1143 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1144 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" 1144 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1145 " dev_export_obj: %d count exists\n", dev, 1145 " dev_export_obj: %d count exists\n", dev,
1146 atomic_read(&dev->dev_export_obj.obj_access_count)); 1146 atomic_read(&dev->dev_export_obj.obj_access_count));
1147 return -EINVAL; 1147 return -EINVAL;
1148 } 1148 }
1149 if (!(queue_depth)) { 1149 if (!queue_depth) {
1150 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" 1150 pr_err("dev[%p]: Illegal ZERO value for queue"
1151 "_depth\n", dev); 1151 "_depth\n", dev);
1152 return -EINVAL; 1152 return -EINVAL;
1153 } 1153 }
1154 1154
1155 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1155 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1156 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1156 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1157 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" 1157 pr_err("dev[%p]: Passed queue_depth: %u"
1158 " exceeds TCM/SE_Device TCQ: %u\n", 1158 " exceeds TCM/SE_Device TCQ: %u\n",
1159 dev, queue_depth, 1159 dev, queue_depth,
1160 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1160 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@@ -1163,7 +1163,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1163 } else { 1163 } else {
1164 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { 1164 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1165 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1165 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1166 printk(KERN_ERR "dev[%p]: Passed queue_depth:" 1166 pr_err("dev[%p]: Passed queue_depth:"
1167 " %u exceeds TCM/SE_Device MAX" 1167 " %u exceeds TCM/SE_Device MAX"
1168 " TCQ: %u\n", dev, queue_depth, 1168 " TCQ: %u\n", dev, queue_depth,
1169 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1169 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@@ -1178,7 +1178,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1178 else if (queue_depth < orig_queue_depth) 1178 else if (queue_depth < orig_queue_depth)
1179 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); 1179 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1180 1180
1181 printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", 1181 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1182 dev, queue_depth); 1182 dev, queue_depth);
1183 return 0; 1183 return 0;
1184} 1184}
@@ -1188,41 +1188,41 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1188 int force = 0; /* Force setting for VDEVS */ 1188 int force = 0; /* Force setting for VDEVS */
1189 1189
1190 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1190 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1191 printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1191 pr_err("dev[%p]: Unable to change SE Device"
1192 " max_sectors while dev_export_obj: %d count exists\n", 1192 " max_sectors while dev_export_obj: %d count exists\n",
1193 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1193 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1194 return -EINVAL; 1194 return -EINVAL;
1195 } 1195 }
1196 if (!(max_sectors)) { 1196 if (!max_sectors) {
1197 printk(KERN_ERR "dev[%p]: Illegal ZERO value for" 1197 pr_err("dev[%p]: Illegal ZERO value for"
1198 " max_sectors\n", dev); 1198 " max_sectors\n", dev);
1199 return -EINVAL; 1199 return -EINVAL;
1200 } 1200 }
1201 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1201 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1202 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" 1202 pr_err("dev[%p]: Passed max_sectors: %u less than"
1203 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, 1203 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1204 DA_STATUS_MAX_SECTORS_MIN); 1204 DA_STATUS_MAX_SECTORS_MIN);
1205 return -EINVAL; 1205 return -EINVAL;
1206 } 1206 }
1207 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1207 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1208 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1208 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1209 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1209 pr_err("dev[%p]: Passed max_sectors: %u"
1210 " greater than TCM/SE_Device max_sectors:" 1210 " greater than TCM/SE_Device max_sectors:"
1211 " %u\n", dev, max_sectors, 1211 " %u\n", dev, max_sectors,
1212 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1212 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1213 return -EINVAL; 1213 return -EINVAL;
1214 } 1214 }
1215 } else { 1215 } else {
1216 if (!(force) && (max_sectors > 1216 if (!force && (max_sectors >
1217 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { 1217 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1218 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1218 pr_err("dev[%p]: Passed max_sectors: %u"
1219 " greater than TCM/SE_Device max_sectors" 1219 " greater than TCM/SE_Device max_sectors"
1220 ": %u, use force=1 to override.\n", dev, 1220 ": %u, use force=1 to override.\n", dev,
1221 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1221 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1222 return -EINVAL; 1222 return -EINVAL;
1223 } 1223 }
1224 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1224 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1225 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1225 pr_err("dev[%p]: Passed max_sectors: %u"
1226 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1226 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1227 " %u\n", dev, max_sectors, 1227 " %u\n", dev, max_sectors,
1228 DA_STATUS_MAX_SECTORS_MAX); 1228 DA_STATUS_MAX_SECTORS_MAX);
@@ -1231,7 +1231,7 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1231 } 1231 }
1232 1232
1233 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1233 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1234 printk("dev[%p]: SE Device max_sectors changed to %u\n", 1234 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1235 dev, max_sectors); 1235 dev, max_sectors);
1236 return 0; 1236 return 0;
1237} 1237}
@@ -1239,25 +1239,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1239int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1239int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1240{ 1240{
1241 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1241 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1242 printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1242 pr_err("dev[%p]: Unable to change SE Device"
1243 " optimal_sectors while dev_export_obj: %d count exists\n", 1243 " optimal_sectors while dev_export_obj: %d count exists\n",
1244 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1244 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1245 return -EINVAL; 1245 return -EINVAL;
1246 } 1246 }
1247 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1247 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1248 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" 1248 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1249 " changed for TCM/pSCSI\n", dev); 1249 " changed for TCM/pSCSI\n", dev);
1250 return -EINVAL; 1250 return -EINVAL;
1251 } 1251 }
1252 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { 1252 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1253 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" 1253 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1254 " greater than max_sectors: %u\n", dev, 1254 " greater than max_sectors: %u\n", dev,
1255 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 1255 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1256 return -EINVAL; 1256 return -EINVAL;
1257 } 1257 }
1258 1258
1259 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; 1259 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1260 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", 1260 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1261 dev, optimal_sectors); 1261 dev, optimal_sectors);
1262 return 0; 1262 return 0;
1263} 1263}
@@ -1265,7 +1265,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1265int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1265int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1266{ 1266{
1267 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1267 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1268 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" 1268 pr_err("dev[%p]: Unable to change SE Device block_size"
1269 " while dev_export_obj: %d count exists\n", dev, 1269 " while dev_export_obj: %d count exists\n", dev,
1270 atomic_read(&dev->dev_export_obj.obj_access_count)); 1270 atomic_read(&dev->dev_export_obj.obj_access_count));
1271 return -EINVAL; 1271 return -EINVAL;
@@ -1275,21 +1275,21 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1275 (block_size != 1024) && 1275 (block_size != 1024) &&
1276 (block_size != 2048) && 1276 (block_size != 2048) &&
1277 (block_size != 4096)) { 1277 (block_size != 4096)) {
1278 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" 1278 pr_err("dev[%p]: Illegal value for block_device: %u"
1279 " for SE device, must be 512, 1024, 2048 or 4096\n", 1279 " for SE device, must be 512, 1024, 2048 or 4096\n",
1280 dev, block_size); 1280 dev, block_size);
1281 return -EINVAL; 1281 return -EINVAL;
1282 } 1282 }
1283 1283
1284 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1284 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1285 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" 1285 pr_err("dev[%p]: Not allowed to change block_size for"
1286 " Physical Device, use for Linux/SCSI to change" 1286 " Physical Device, use for Linux/SCSI to change"
1287 " block_size for underlying hardware\n", dev); 1287 " block_size for underlying hardware\n", dev);
1288 return -EINVAL; 1288 return -EINVAL;
1289 } 1289 }
1290 1290
1291 dev->se_sub_dev->se_dev_attrib.block_size = block_size; 1291 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1292 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", 1292 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1293 dev, block_size); 1293 dev, block_size);
1294 return 0; 1294 return 0;
1295} 1295}
@@ -1304,13 +1304,13 @@ struct se_lun *core_dev_add_lun(
1304 u32 lun_access = 0; 1304 u32 lun_access = 0;
1305 1305
1306 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1306 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1307 printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", 1307 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1308 atomic_read(&dev->dev_access_obj.obj_access_count)); 1308 atomic_read(&dev->dev_access_obj.obj_access_count));
1309 return NULL; 1309 return NULL;
1310 } 1310 }
1311 1311
1312 lun_p = core_tpg_pre_addlun(tpg, lun); 1312 lun_p = core_tpg_pre_addlun(tpg, lun);
1313 if ((IS_ERR(lun_p)) || !(lun_p)) 1313 if ((IS_ERR(lun_p)) || !lun_p)
1314 return NULL; 1314 return NULL;
1315 1315
1316 if (dev->dev_flags & DF_READ_ONLY) 1316 if (dev->dev_flags & DF_READ_ONLY)
@@ -1321,7 +1321,7 @@ struct se_lun *core_dev_add_lun(
1321 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) 1321 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1322 return NULL; 1322 return NULL;
1323 1323
1324 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1324 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1325 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1325 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1326 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1326 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1327 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); 1327 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
@@ -1357,12 +1357,12 @@ int core_dev_del_lun(
1357 int ret = 0; 1357 int ret = 0;
1358 1358
1359 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); 1359 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1360 if (!(lun)) 1360 if (!lun)
1361 return ret; 1361 return ret;
1362 1362
1363 core_tpg_post_dellun(tpg, lun); 1363 core_tpg_post_dellun(tpg, lun);
1364 1364
1365 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1365 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1366 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1366 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1367 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1367 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1368 tpg->se_tpg_tfo->get_fabric_name()); 1368 tpg->se_tpg_tfo->get_fabric_name());
@@ -1376,7 +1376,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
1376 1376
1377 spin_lock(&tpg->tpg_lun_lock); 1377 spin_lock(&tpg->tpg_lun_lock);
1378 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1378 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1379 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1379 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1380 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1380 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1381 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1381 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1382 TRANSPORT_MAX_LUNS_PER_TPG-1, 1382 TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -1387,7 +1387,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
1387 lun = &tpg->tpg_lun_list[unpacked_lun]; 1387 lun = &tpg->tpg_lun_list[unpacked_lun];
1388 1388
1389 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1389 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1390 printk(KERN_ERR "%s Logical Unit Number: %u is not free on" 1390 pr_err("%s Logical Unit Number: %u is not free on"
1391 " Target Portal Group: %hu, ignoring request.\n", 1391 " Target Portal Group: %hu, ignoring request.\n",
1392 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1392 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1393 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1393 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1409,7 +1409,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1409 1409
1410 spin_lock(&tpg->tpg_lun_lock); 1410 spin_lock(&tpg->tpg_lun_lock);
1411 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1411 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1412 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1412 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1413 "_TPG-1: %u for Target Portal Group: %hu\n", 1413 "_TPG-1: %u for Target Portal Group: %hu\n",
1414 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1414 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1415 TRANSPORT_MAX_LUNS_PER_TPG-1, 1415 TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -1420,7 +1420,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1420 lun = &tpg->tpg_lun_list[unpacked_lun]; 1420 lun = &tpg->tpg_lun_list[unpacked_lun];
1421 1421
1422 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1422 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1423 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1423 pr_err("%s Logical Unit Number: %u is not active on"
1424 " Target Portal Group: %hu, ignoring request.\n", 1424 " Target Portal Group: %hu, ignoring request.\n",
1425 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1425 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1426 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1426 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1442,19 +1442,19 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1442 struct se_node_acl *nacl; 1442 struct se_node_acl *nacl;
1443 1443
1444 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1444 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1445 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", 1445 pr_err("%s InitiatorName exceeds maximum size.\n",
1446 tpg->se_tpg_tfo->get_fabric_name()); 1446 tpg->se_tpg_tfo->get_fabric_name());
1447 *ret = -EOVERFLOW; 1447 *ret = -EOVERFLOW;
1448 return NULL; 1448 return NULL;
1449 } 1449 }
1450 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 1450 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1451 if (!(nacl)) { 1451 if (!nacl) {
1452 *ret = -EINVAL; 1452 *ret = -EINVAL;
1453 return NULL; 1453 return NULL;
1454 } 1454 }
1455 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1455 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1456 if (!(lacl)) { 1456 if (!lacl) {
1457 printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); 1457 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1458 *ret = -ENOMEM; 1458 *ret = -ENOMEM;
1459 return NULL; 1459 return NULL;
1460 } 1460 }
@@ -1477,8 +1477,8 @@ int core_dev_add_initiator_node_lun_acl(
1477 struct se_node_acl *nacl; 1477 struct se_node_acl *nacl;
1478 1478
1479 lun = core_dev_get_lun(tpg, unpacked_lun); 1479 lun = core_dev_get_lun(tpg, unpacked_lun);
1480 if (!(lun)) { 1480 if (!lun) {
1481 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1481 pr_err("%s Logical Unit Number: %u is not active on"
1482 " Target Portal Group: %hu, ignoring request.\n", 1482 " Target Portal Group: %hu, ignoring request.\n",
1483 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1483 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1484 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1484 tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1486,7 +1486,7 @@ int core_dev_add_initiator_node_lun_acl(
1486 } 1486 }
1487 1487
1488 nacl = lacl->se_lun_nacl; 1488 nacl = lacl->se_lun_nacl;
1489 if (!(nacl)) 1489 if (!nacl)
1490 return -EINVAL; 1490 return -EINVAL;
1491 1491
1492 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1492 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
@@ -1505,7 +1505,7 @@ int core_dev_add_initiator_node_lun_acl(
1505 smp_mb__after_atomic_inc(); 1505 smp_mb__after_atomic_inc();
1506 spin_unlock(&lun->lun_acl_lock); 1506 spin_unlock(&lun->lun_acl_lock);
1507 1507
1508 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1508 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1509 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1509 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1510 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1510 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1511 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1511 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
@@ -1530,7 +1530,7 @@ int core_dev_del_initiator_node_lun_acl(
1530 struct se_node_acl *nacl; 1530 struct se_node_acl *nacl;
1531 1531
1532 nacl = lacl->se_lun_nacl; 1532 nacl = lacl->se_lun_nacl;
1533 if (!(nacl)) 1533 if (!nacl)
1534 return -EINVAL; 1534 return -EINVAL;
1535 1535
1536 spin_lock(&lun->lun_acl_lock); 1536 spin_lock(&lun->lun_acl_lock);
@@ -1544,7 +1544,7 @@ int core_dev_del_initiator_node_lun_acl(
1544 1544
1545 lacl->se_lun = NULL; 1545 lacl->se_lun = NULL;
1546 1546
1547 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1547 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1548 " InitiatorNode: %s Mapped LUN: %u\n", 1548 " InitiatorNode: %s Mapped LUN: %u\n",
1549 tpg->se_tpg_tfo->get_fabric_name(), 1549 tpg->se_tpg_tfo->get_fabric_name(),
1550 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1550 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@@ -1557,7 +1557,7 @@ void core_dev_free_initiator_node_lun_acl(
1557 struct se_portal_group *tpg, 1557 struct se_portal_group *tpg,
1558 struct se_lun_acl *lacl) 1558 struct se_lun_acl *lacl)
1559{ 1559{
1560 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1560 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1561 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1561 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1562 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1562 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1563 tpg->se_tpg_tfo->get_fabric_name(), 1563 tpg->se_tpg_tfo->get_fabric_name(),
@@ -1575,7 +1575,7 @@ int core_dev_setup_virtual_lun0(void)
1575 char buf[16]; 1575 char buf[16];
1576 int ret; 1576 int ret;
1577 1577
1578 hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); 1578 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1579 if (IS_ERR(hba)) 1579 if (IS_ERR(hba))
1580 return PTR_ERR(hba); 1580 return PTR_ERR(hba);
1581 1581
@@ -1583,8 +1583,8 @@ int core_dev_setup_virtual_lun0(void)
1583 t = hba->transport; 1583 t = hba->transport;
1584 1584
1585 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 1585 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1586 if (!(se_dev)) { 1586 if (!se_dev) {
1587 printk(KERN_ERR "Unable to allocate memory for" 1587 pr_err("Unable to allocate memory for"
1588 " struct se_subsystem_dev\n"); 1588 " struct se_subsystem_dev\n");
1589 ret = -ENOMEM; 1589 ret = -ENOMEM;
1590 goto out; 1590 goto out;
@@ -1606,8 +1606,8 @@ int core_dev_setup_virtual_lun0(void)
1606 se_dev->se_dev_hba = hba; 1606 se_dev->se_dev_hba = hba;
1607 1607
1608 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); 1608 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1609 if (!(se_dev->se_dev_su_ptr)) { 1609 if (!se_dev->se_dev_su_ptr) {
1610 printk(KERN_ERR "Unable to locate subsystem dependent pointer" 1610 pr_err("Unable to locate subsystem dependent pointer"
1611 " from allocate_virtdevice()\n"); 1611 " from allocate_virtdevice()\n");
1612 ret = -ENOMEM; 1612 ret = -ENOMEM;
1613 goto out; 1613 goto out;
@@ -1643,7 +1643,7 @@ void core_dev_release_virtual_lun0(void)
1643 struct se_hba *hba = lun0_hba; 1643 struct se_hba *hba = lun0_hba;
1644 struct se_subsystem_dev *su_dev = lun0_su_dev; 1644 struct se_subsystem_dev *su_dev = lun0_su_dev;
1645 1645
1646 if (!(hba)) 1646 if (!hba)
1647 return; 1647 return;
1648 1648
1649 if (g_lun0_dev) 1649 if (g_lun0_dev)