aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_device.c
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-07-19 04:55:10 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:43 -0400
commite3d6f909ed803d92a5ac9b4a2c087e0eae9b90d0 (patch)
tree2eb65e958a2cc35c896a0e184ec09edcb9076b3b /drivers/target/target_core_device.c
parenta8c6da90b823fb94ca76ca0df6bb44e6e205dc87 (diff)
target: Core cleanups from AGrover (round 1)
This patch contains the squashed version of a number of cleanups and minor fixes from Andy's initial series (round 1) for target core this past spring. The condensed log looks like: target: use errno values instead of returning -1 for everything target: Rename transport_calc_sg_num to transport_init_task_sg target: Fix leak in error path in transport_init_task_sg target/pscsi: Remove pscsi_get_sh() usage target: Make two runtime checks into WARN_ONs target: Remove hba queue depth and convert to spin_lock_irq usage target: dev->dev_status_queue_obj is unused target: Make struct se_queue_req.cmd type struct se_cmd * target: Remove __transport_get_qr_from_queue() target: Rename se_dev->g_se_dev_list to se_dev_node target: Remove struct se_global target: Simplify scsi mib index table code target: Make dev_queue_obj a member of se_device instead of a pointer target: remove extraneous returns at end of void functions target: Ensure transport_dump_vpd_ident_type returns null-terminated str target: Function pointers don't need to use '&' to be assigned target: Fix comment in __transport_execute_tasks() target: Misc style cleanups target: rename struct pr_reservation_template to pr_reservation target: Remove #defines that just perform indirection target: Inline transport_get_task_from_execute_queue() target: Minor header comment fixes Signed-off-by: Andy Grover <agrover@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r--drivers/target/target_core_device.c440
1 files changed, 216 insertions, 224 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e76ffc5b2079..fd923854505c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c) 2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 * 3 *
4 * This file contains the iSCSI Virtual Device and Disk Transport 4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions. 5 * agnostic related functions.
6 * 6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
@@ -54,25 +54,30 @@
54static void se_dev_start(struct se_device *dev); 54static void se_dev_start(struct se_device *dev);
55static void se_dev_stop(struct se_device *dev); 55static void se_dev_stop(struct se_device *dev);
56 56
57static struct se_hba *lun0_hba;
58static struct se_subsystem_dev *lun0_su_dev;
59/* not static, needed by tpg.c */
60struct se_device *g_lun0_dev;
61
57int transport_get_lun_for_cmd( 62int transport_get_lun_for_cmd(
58 struct se_cmd *se_cmd, 63 struct se_cmd *se_cmd,
59 u32 unpacked_lun) 64 u32 unpacked_lun)
60{ 65{
61 struct se_dev_entry *deve; 66 struct se_dev_entry *deve;
62 struct se_lun *se_lun = NULL; 67 struct se_lun *se_lun = NULL;
63 struct se_session *se_sess = SE_SESS(se_cmd); 68 struct se_session *se_sess = se_cmd->se_sess;
64 unsigned long flags; 69 unsigned long flags;
65 int read_only = 0; 70 int read_only = 0;
66 71
67 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 72 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
68 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 73 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
69 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 74 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
70 return -1; 75 return -ENODEV;
71 } 76 }
72 77
73 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 78 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
74 deve = se_cmd->se_deve = 79 deve = se_cmd->se_deve =
75 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; 80 &se_sess->se_node_acl->device_list[unpacked_lun];
76 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 81 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
77 if (se_cmd) { 82 if (se_cmd) {
78 deve->total_cmds++; 83 deve->total_cmds++;
@@ -95,11 +100,11 @@ int transport_get_lun_for_cmd(
95 se_lun = se_cmd->se_lun = deve->se_lun; 100 se_lun = se_cmd->se_lun = deve->se_lun;
96 se_cmd->pr_res_key = deve->pr_res_key; 101 se_cmd->pr_res_key = deve->pr_res_key;
97 se_cmd->orig_fe_lun = unpacked_lun; 102 se_cmd->orig_fe_lun = unpacked_lun;
98 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; 103 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
99 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 104 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
100 } 105 }
101out: 106out:
102 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 107 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
103 108
104 if (!se_lun) { 109 if (!se_lun) {
105 if (read_only) { 110 if (read_only) {
@@ -107,9 +112,9 @@ out:
107 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 112 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
108 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 113 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
109 " Access for 0x%08x\n", 114 " Access for 0x%08x\n",
110 CMD_TFO(se_cmd)->get_fabric_name(), 115 se_cmd->se_tfo->get_fabric_name(),
111 unpacked_lun); 116 unpacked_lun);
112 return -1; 117 return -EACCES;
113 } else { 118 } else {
114 /* 119 /*
115 * Use the se_portal_group->tpg_virt_lun0 to allow for 120 * Use the se_portal_group->tpg_virt_lun0 to allow for
@@ -121,9 +126,9 @@ out:
121 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 126 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
122 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 127 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
123 " Access for 0x%08x\n", 128 " Access for 0x%08x\n",
124 CMD_TFO(se_cmd)->get_fabric_name(), 129 se_cmd->se_tfo->get_fabric_name(),
125 unpacked_lun); 130 unpacked_lun);
126 return -1; 131 return -ENODEV;
127 } 132 }
128 /* 133 /*
129 * Force WRITE PROTECT for virtual LUN 0 134 * Force WRITE PROTECT for virtual LUN 0
@@ -132,15 +137,15 @@ out:
132 (se_cmd->data_direction != DMA_NONE)) { 137 (se_cmd->data_direction != DMA_NONE)) {
133 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 138 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
134 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 139 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
135 return -1; 140 return -EACCES;
136 } 141 }
137#if 0 142#if 0
138 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", 143 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
139 CMD_TFO(se_cmd)->get_fabric_name()); 144 se_cmd->se_tfo->get_fabric_name());
140#endif 145#endif
141 se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 146 se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
142 se_cmd->orig_fe_lun = 0; 147 se_cmd->orig_fe_lun = 0;
143 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; 148 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
144 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 149 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
145 } 150 }
146 } 151 }
@@ -151,7 +156,7 @@ out:
151 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 156 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
152 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 157 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
153 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 158 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
154 return -1; 159 return -ENODEV;
155 } 160 }
156 161
157 { 162 {
@@ -171,10 +176,10 @@ out:
171 */ 176 */
172 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); 177 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
173 list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); 178 list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
174 atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); 179 atomic_set(&se_cmd->t_task->transport_lun_active, 1);
175#if 0 180#if 0
176 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", 181 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
177 CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); 182 se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun);
178#endif 183#endif
179 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); 184 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
180 185
@@ -189,35 +194,35 @@ int transport_get_lun_for_tmr(
189 struct se_device *dev = NULL; 194 struct se_device *dev = NULL;
190 struct se_dev_entry *deve; 195 struct se_dev_entry *deve;
191 struct se_lun *se_lun = NULL; 196 struct se_lun *se_lun = NULL;
192 struct se_session *se_sess = SE_SESS(se_cmd); 197 struct se_session *se_sess = se_cmd->se_sess;
193 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 198 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
194 199
195 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 200 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
196 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 201 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
197 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 202 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
198 return -1; 203 return -ENODEV;
199 } 204 }
200 205
201 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 206 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
202 deve = se_cmd->se_deve = 207 deve = se_cmd->se_deve =
203 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; 208 &se_sess->se_node_acl->device_list[unpacked_lun];
204 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 209 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
205 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; 210 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
206 dev = se_lun->lun_se_dev; 211 dev = se_lun->lun_se_dev;
207 se_cmd->pr_res_key = deve->pr_res_key; 212 se_cmd->pr_res_key = deve->pr_res_key;
208 se_cmd->orig_fe_lun = unpacked_lun; 213 se_cmd->orig_fe_lun = unpacked_lun;
209 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; 214 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
210/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ 215/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
211 } 216 }
212 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 217 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
213 218
214 if (!se_lun) { 219 if (!se_lun) {
215 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 220 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
216 " Access for 0x%08x\n", 221 " Access for 0x%08x\n",
217 CMD_TFO(se_cmd)->get_fabric_name(), 222 se_cmd->se_tfo->get_fabric_name(),
218 unpacked_lun); 223 unpacked_lun);
219 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 224 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
220 return -1; 225 return -ENODEV;
221 } 226 }
222 /* 227 /*
223 * Determine if the struct se_lun is online. 228 * Determine if the struct se_lun is online.
@@ -225,7 +230,7 @@ int transport_get_lun_for_tmr(
225/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ 230/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
226 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 231 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
227 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 232 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
228 return -1; 233 return -ENODEV;
229 } 234 }
230 se_tmr->tmr_dev = dev; 235 se_tmr->tmr_dev = dev;
231 236
@@ -263,14 +268,14 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
263 if (!(lun)) { 268 if (!(lun)) {
264 printk(KERN_ERR "%s device entries device pointer is" 269 printk(KERN_ERR "%s device entries device pointer is"
265 " NULL, but Initiator has access.\n", 270 " NULL, but Initiator has access.\n",
266 TPG_TFO(tpg)->get_fabric_name()); 271 tpg->se_tpg_tfo->get_fabric_name());
267 continue; 272 continue;
268 } 273 }
269 port = lun->lun_sep; 274 port = lun->lun_sep;
270 if (!(port)) { 275 if (!(port)) {
271 printk(KERN_ERR "%s device entries device pointer is" 276 printk(KERN_ERR "%s device entries device pointer is"
272 " NULL, but Initiator has access.\n", 277 " NULL, but Initiator has access.\n",
273 TPG_TFO(tpg)->get_fabric_name()); 278 tpg->se_tpg_tfo->get_fabric_name());
274 continue; 279 continue;
275 } 280 }
276 if (port->sep_rtpi != rtpi) 281 if (port->sep_rtpi != rtpi)
@@ -308,7 +313,7 @@ int core_free_device_list_for_node(
308 if (!deve->se_lun) { 313 if (!deve->se_lun) {
309 printk(KERN_ERR "%s device entries device pointer is" 314 printk(KERN_ERR "%s device entries device pointer is"
310 " NULL, but Initiator has access.\n", 315 " NULL, but Initiator has access.\n",
311 TPG_TFO(tpg)->get_fabric_name()); 316 tpg->se_tpg_tfo->get_fabric_name());
312 continue; 317 continue;
313 } 318 }
314 lun = deve->se_lun; 319 lun = deve->se_lun;
@@ -334,8 +339,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
334 deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; 339 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
335 deve->deve_cmds--; 340 deve->deve_cmds--;
336 spin_unlock_irq(&se_nacl->device_list_lock); 341 spin_unlock_irq(&se_nacl->device_list_lock);
337
338 return;
339} 342}
340 343
341void core_update_device_list_access( 344void core_update_device_list_access(
@@ -355,8 +358,6 @@ void core_update_device_list_access(
355 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 358 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
356 } 359 }
357 spin_unlock_irq(&nacl->device_list_lock); 360 spin_unlock_irq(&nacl->device_list_lock);
358
359 return;
360} 361}
361 362
362/* core_update_device_list_for_node(): 363/* core_update_device_list_for_node():
@@ -408,14 +409,14 @@ int core_update_device_list_for_node(
408 " already set for demo mode -> explict" 409 " already set for demo mode -> explict"
409 " LUN ACL transition\n"); 410 " LUN ACL transition\n");
410 spin_unlock_irq(&nacl->device_list_lock); 411 spin_unlock_irq(&nacl->device_list_lock);
411 return -1; 412 return -EINVAL;
412 } 413 }
413 if (deve->se_lun != lun) { 414 if (deve->se_lun != lun) {
414 printk(KERN_ERR "struct se_dev_entry->se_lun does" 415 printk(KERN_ERR "struct se_dev_entry->se_lun does"
415 " match passed struct se_lun for demo mode" 416 " match passed struct se_lun for demo mode"
416 " -> explict LUN ACL transition\n"); 417 " -> explict LUN ACL transition\n");
417 spin_unlock_irq(&nacl->device_list_lock); 418 spin_unlock_irq(&nacl->device_list_lock);
418 return -1; 419 return -EINVAL;
419 } 420 }
420 deve->se_lun_acl = lun_acl; 421 deve->se_lun_acl = lun_acl;
421 trans = 1; 422 trans = 1;
@@ -503,8 +504,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
503 spin_lock_bh(&tpg->acl_node_lock); 504 spin_lock_bh(&tpg->acl_node_lock);
504 } 505 }
505 spin_unlock_bh(&tpg->acl_node_lock); 506 spin_unlock_bh(&tpg->acl_node_lock);
506
507 return;
508} 507}
509 508
510static struct se_port *core_alloc_port(struct se_device *dev) 509static struct se_port *core_alloc_port(struct se_device *dev)
@@ -514,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev)
514 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 513 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
515 if (!(port)) { 514 if (!(port)) {
516 printk(KERN_ERR "Unable to allocate struct se_port\n"); 515 printk(KERN_ERR "Unable to allocate struct se_port\n");
517 return NULL; 516 return ERR_PTR(-ENOMEM);
518 } 517 }
519 INIT_LIST_HEAD(&port->sep_alua_list); 518 INIT_LIST_HEAD(&port->sep_alua_list);
520 INIT_LIST_HEAD(&port->sep_list); 519 INIT_LIST_HEAD(&port->sep_list);
@@ -527,7 +526,7 @@ static struct se_port *core_alloc_port(struct se_device *dev)
527 printk(KERN_WARNING "Reached dev->dev_port_count ==" 526 printk(KERN_WARNING "Reached dev->dev_port_count =="
528 " 0x0000ffff\n"); 527 " 0x0000ffff\n");
529 spin_unlock(&dev->se_port_lock); 528 spin_unlock(&dev->se_port_lock);
530 return NULL; 529 return ERR_PTR(-ENOSPC);
531 } 530 }
532again: 531again:
533 /* 532 /*
@@ -565,7 +564,7 @@ static void core_export_port(
565 struct se_port *port, 564 struct se_port *port,
566 struct se_lun *lun) 565 struct se_lun *lun)
567{ 566{
568 struct se_subsystem_dev *su_dev = SU_DEV(dev); 567 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
569 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 568 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
570 569
571 spin_lock(&dev->se_port_lock); 570 spin_lock(&dev->se_port_lock);
@@ -578,7 +577,7 @@ static void core_export_port(
578 list_add_tail(&port->sep_list, &dev->dev_sep_list); 577 list_add_tail(&port->sep_list, &dev->dev_sep_list);
579 spin_unlock(&dev->se_port_lock); 578 spin_unlock(&dev->se_port_lock);
580 579
581 if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { 580 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
582 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 581 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
583 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 582 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
584 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" 583 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
@@ -587,11 +586,11 @@ static void core_export_port(
587 } 586 }
588 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 587 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
589 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 588 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
590 T10_ALUA(su_dev)->default_tg_pt_gp); 589 su_dev->t10_alua.default_tg_pt_gp);
591 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 590 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
592 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" 591 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
593 " Group: alua/default_tg_pt_gp\n", 592 " Group: alua/default_tg_pt_gp\n",
594 TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); 593 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
595 } 594 }
596 595
597 dev->dev_port_count++; 596 dev->dev_port_count++;
@@ -618,8 +617,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port)
618 list_del(&port->sep_list); 617 list_del(&port->sep_list);
619 dev->dev_port_count--; 618 dev->dev_port_count--;
620 kfree(port); 619 kfree(port);
621
622 return;
623} 620}
624 621
625int core_dev_export( 622int core_dev_export(
@@ -630,8 +627,8 @@ int core_dev_export(
630 struct se_port *port; 627 struct se_port *port;
631 628
632 port = core_alloc_port(dev); 629 port = core_alloc_port(dev);
633 if (!(port)) 630 if (IS_ERR(port))
634 return -1; 631 return PTR_ERR(port);
635 632
636 lun->lun_se_dev = dev; 633 lun->lun_se_dev = dev;
637 se_dev_start(dev); 634 se_dev_start(dev);
@@ -668,12 +665,12 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
668{ 665{
669 struct se_dev_entry *deve; 666 struct se_dev_entry *deve;
670 struct se_lun *se_lun; 667 struct se_lun *se_lun;
671 struct se_session *se_sess = SE_SESS(se_cmd); 668 struct se_session *se_sess = se_cmd->se_sess;
672 struct se_task *se_task; 669 struct se_task *se_task;
673 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; 670 unsigned char *buf = se_cmd->t_task->t_task_buf;
674 u32 cdb_offset = 0, lun_count = 0, offset = 8, i; 671 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
675 672
676 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) 673 list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list)
677 break; 674 break;
678 675
679 if (!(se_task)) { 676 if (!(se_task)) {
@@ -692,9 +689,9 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
692 goto done; 689 goto done;
693 } 690 }
694 691
695 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 692 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
696 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 693 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
697 deve = &SE_NODE_ACL(se_sess)->device_list[i]; 694 deve = &se_sess->se_node_acl->device_list[i];
698 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 695 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
699 continue; 696 continue;
700 se_lun = deve->se_lun; 697 se_lun = deve->se_lun;
@@ -711,7 +708,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
711 offset += 8; 708 offset += 8;
712 cdb_offset += 8; 709 cdb_offset += 8;
713 } 710 }
714 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 711 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
715 712
716 /* 713 /*
717 * See SPC3 r07, page 159. 714 * See SPC3 r07, page 159.
@@ -755,26 +752,20 @@ void se_release_device_for_hba(struct se_device *dev)
755 core_scsi3_free_all_registrations(dev); 752 core_scsi3_free_all_registrations(dev);
756 se_release_vpd_for_dev(dev); 753 se_release_vpd_for_dev(dev);
757 754
758 kfree(dev->dev_status_queue_obj);
759 kfree(dev->dev_queue_obj);
760 kfree(dev); 755 kfree(dev);
761
762 return;
763} 756}
764 757
765void se_release_vpd_for_dev(struct se_device *dev) 758void se_release_vpd_for_dev(struct se_device *dev)
766{ 759{
767 struct t10_vpd *vpd, *vpd_tmp; 760 struct t10_vpd *vpd, *vpd_tmp;
768 761
769 spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); 762 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
770 list_for_each_entry_safe(vpd, vpd_tmp, 763 list_for_each_entry_safe(vpd, vpd_tmp,
771 &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { 764 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
772 list_del(&vpd->vpd_list); 765 list_del(&vpd->vpd_list);
773 kfree(vpd); 766 kfree(vpd);
774 } 767 }
775 spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); 768 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
776
777 return;
778} 769}
779 770
780/* se_free_virtual_device(): 771/* se_free_virtual_device():
@@ -860,48 +851,48 @@ void se_dev_set_default_attribs(
860{ 851{
861 struct queue_limits *limits = &dev_limits->limits; 852 struct queue_limits *limits = &dev_limits->limits;
862 853
863 DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; 854 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
864 DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; 855 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
865 DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; 856 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
866 DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; 857 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
867 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 858 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
868 DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; 859 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
869 DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; 860 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
870 DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; 861 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
871 DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; 862 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
872 DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; 863 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
873 DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 864 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
874 /* 865 /*
875 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK 866 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
876 * iblock_create_virtdevice() from struct queue_limits values 867 * iblock_create_virtdevice() from struct queue_limits values
877 * if blk_queue_discard()==1 868 * if blk_queue_discard()==1
878 */ 869 */
879 DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 870 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
880 DEV_ATTRIB(dev)->max_unmap_block_desc_count = 871 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
881 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 872 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
882 DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 873 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
883 DEV_ATTRIB(dev)->unmap_granularity_alignment = 874 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
884 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 875 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
885 /* 876 /*
886 * block_size is based on subsystem plugin dependent requirements. 877 * block_size is based on subsystem plugin dependent requirements.
887 */ 878 */
888 DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; 879 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
889 DEV_ATTRIB(dev)->block_size = limits->logical_block_size; 880 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
890 /* 881 /*
891 * max_sectors is based on subsystem plugin dependent requirements. 882 * max_sectors is based on subsystem plugin dependent requirements.
892 */ 883 */
893 DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; 884 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
894 DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; 885 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
895 /* 886 /*
896 * Set optimal_sectors from max_sectors, which can be lowered via 887 * Set optimal_sectors from max_sectors, which can be lowered via
897 * configfs. 888 * configfs.
898 */ 889 */
899 DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; 890 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
900 /* 891 /*
901 * queue_depth is based on subsystem plugin dependent requirements. 892 * queue_depth is based on subsystem plugin dependent requirements.
902 */ 893 */
903 DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; 894 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
904 DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; 895 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
905} 896}
906 897
907int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) 898int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
@@ -909,9 +900,9 @@ int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
909 if (task_timeout > DA_TASK_TIMEOUT_MAX) { 900 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
910 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" 901 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
911 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); 902 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
912 return -1; 903 return -EINVAL;
913 } else { 904 } else {
914 DEV_ATTRIB(dev)->task_timeout = task_timeout; 905 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
915 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", 906 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
916 dev, task_timeout); 907 dev, task_timeout);
917 } 908 }
@@ -923,9 +914,9 @@ int se_dev_set_max_unmap_lba_count(
923 struct se_device *dev, 914 struct se_device *dev,
924 u32 max_unmap_lba_count) 915 u32 max_unmap_lba_count)
925{ 916{
926 DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; 917 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
927 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", 918 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
928 dev, DEV_ATTRIB(dev)->max_unmap_lba_count); 919 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
929 return 0; 920 return 0;
930} 921}
931 922
@@ -933,9 +924,10 @@ int se_dev_set_max_unmap_block_desc_count(
933 struct se_device *dev, 924 struct se_device *dev,
934 u32 max_unmap_block_desc_count) 925 u32 max_unmap_block_desc_count)
935{ 926{
936 DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; 927 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
928 max_unmap_block_desc_count;
937 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", 929 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
938 dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); 930 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
939 return 0; 931 return 0;
940} 932}
941 933
@@ -943,9 +935,9 @@ int se_dev_set_unmap_granularity(
943 struct se_device *dev, 935 struct se_device *dev,
944 u32 unmap_granularity) 936 u32 unmap_granularity)
945{ 937{
946 DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; 938 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
947 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", 939 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
948 dev, DEV_ATTRIB(dev)->unmap_granularity); 940 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
949 return 0; 941 return 0;
950} 942}
951 943
@@ -953,9 +945,9 @@ int se_dev_set_unmap_granularity_alignment(
953 struct se_device *dev, 945 struct se_device *dev,
954 u32 unmap_granularity_alignment) 946 u32 unmap_granularity_alignment)
955{ 947{
956 DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; 948 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
957 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", 949 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
958 dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); 950 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
959 return 0; 951 return 0;
960} 952}
961 953
@@ -963,19 +955,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
963{ 955{
964 if ((flag != 0) && (flag != 1)) { 956 if ((flag != 0) && (flag != 1)) {
965 printk(KERN_ERR "Illegal value %d\n", flag); 957 printk(KERN_ERR "Illegal value %d\n", flag);
966 return -1; 958 return -EINVAL;
967 } 959 }
968 if (TRANSPORT(dev)->dpo_emulated == NULL) { 960 if (dev->transport->dpo_emulated == NULL) {
969 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); 961 printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n");
970 return -1; 962 return -EINVAL;
971 } 963 }
972 if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { 964 if (dev->transport->dpo_emulated(dev) == 0) {
973 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); 965 printk(KERN_ERR "dev->transport->dpo_emulated not supported\n");
974 return -1; 966 return -EINVAL;
975 } 967 }
976 DEV_ATTRIB(dev)->emulate_dpo = flag; 968 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
977 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" 969 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
978 " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); 970 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
979 return 0; 971 return 0;
980} 972}
981 973
@@ -983,19 +975,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
983{ 975{
984 if ((flag != 0) && (flag != 1)) { 976 if ((flag != 0) && (flag != 1)) {
985 printk(KERN_ERR "Illegal value %d\n", flag); 977 printk(KERN_ERR "Illegal value %d\n", flag);
986 return -1; 978 return -EINVAL;
987 } 979 }
988 if (TRANSPORT(dev)->fua_write_emulated == NULL) { 980 if (dev->transport->fua_write_emulated == NULL) {
989 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); 981 printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n");
990 return -1; 982 return -EINVAL;
991 } 983 }
992 if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { 984 if (dev->transport->fua_write_emulated(dev) == 0) {
993 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); 985 printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n");
994 return -1; 986 return -EINVAL;
995 } 987 }
996 DEV_ATTRIB(dev)->emulate_fua_write = flag; 988 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
997 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 989 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
998 dev, DEV_ATTRIB(dev)->emulate_fua_write); 990 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
999 return 0; 991 return 0;
1000} 992}
1001 993
@@ -1003,19 +995,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1003{ 995{
1004 if ((flag != 0) && (flag != 1)) { 996 if ((flag != 0) && (flag != 1)) {
1005 printk(KERN_ERR "Illegal value %d\n", flag); 997 printk(KERN_ERR "Illegal value %d\n", flag);
1006 return -1; 998 return -EINVAL;
1007 } 999 }
1008 if (TRANSPORT(dev)->fua_read_emulated == NULL) { 1000 if (dev->transport->fua_read_emulated == NULL) {
1009 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); 1001 printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n");
1010 return -1; 1002 return -EINVAL;
1011 } 1003 }
1012 if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { 1004 if (dev->transport->fua_read_emulated(dev) == 0) {
1013 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); 1005 printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n");
1014 return -1; 1006 return -EINVAL;
1015 } 1007 }
1016 DEV_ATTRIB(dev)->emulate_fua_read = flag; 1008 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
1017 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", 1009 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
1018 dev, DEV_ATTRIB(dev)->emulate_fua_read); 1010 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
1019 return 0; 1011 return 0;
1020} 1012}
1021 1013
@@ -1023,19 +1015,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1023{ 1015{
1024 if ((flag != 0) && (flag != 1)) { 1016 if ((flag != 0) && (flag != 1)) {
1025 printk(KERN_ERR "Illegal value %d\n", flag); 1017 printk(KERN_ERR "Illegal value %d\n", flag);
1026 return -1; 1018 return -EINVAL;
1027 } 1019 }
1028 if (TRANSPORT(dev)->write_cache_emulated == NULL) { 1020 if (dev->transport->write_cache_emulated == NULL) {
1029 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); 1021 printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n");
1030 return -1; 1022 return -EINVAL;
1031 } 1023 }
1032 if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { 1024 if (dev->transport->write_cache_emulated(dev) == 0) {
1033 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); 1025 printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n");
1034 return -1; 1026 return -EINVAL;
1035 } 1027 }
1036 DEV_ATTRIB(dev)->emulate_write_cache = flag; 1028 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1037 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 1029 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1038 dev, DEV_ATTRIB(dev)->emulate_write_cache); 1030 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1039 return 0; 1031 return 0;
1040} 1032}
1041 1033
@@ -1043,7 +1035,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1043{ 1035{
1044 if ((flag != 0) && (flag != 1) && (flag != 2)) { 1036 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1045 printk(KERN_ERR "Illegal value %d\n", flag); 1037 printk(KERN_ERR "Illegal value %d\n", flag);
1046 return -1; 1038 return -EINVAL;
1047 } 1039 }
1048 1040
1049 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1041 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@@ -1051,11 +1043,11 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1051 " UA_INTRLCK_CTRL while dev_export_obj: %d count" 1043 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1052 " exists\n", dev, 1044 " exists\n", dev,
1053 atomic_read(&dev->dev_export_obj.obj_access_count)); 1045 atomic_read(&dev->dev_export_obj.obj_access_count));
1054 return -1; 1046 return -EINVAL;
1055 } 1047 }
1056 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; 1048 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1057 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 1049 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1058 dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); 1050 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1059 1051
1060 return 0; 1052 return 0;
1061} 1053}
@@ -1064,18 +1056,18 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1064{ 1056{
1065 if ((flag != 0) && (flag != 1)) { 1057 if ((flag != 0) && (flag != 1)) {
1066 printk(KERN_ERR "Illegal value %d\n", flag); 1058 printk(KERN_ERR "Illegal value %d\n", flag);
1067 return -1; 1059 return -EINVAL;
1068 } 1060 }
1069 1061
1070 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1062 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1071 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" 1063 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
1072 " dev_export_obj: %d count exists\n", dev, 1064 " dev_export_obj: %d count exists\n", dev,
1073 atomic_read(&dev->dev_export_obj.obj_access_count)); 1065 atomic_read(&dev->dev_export_obj.obj_access_count));
1074 return -1; 1066 return -EINVAL;
1075 } 1067 }
1076 DEV_ATTRIB(dev)->emulate_tas = flag; 1068 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1077 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 1069 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1078 dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); 1070 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1079 1071
1080 return 0; 1072 return 0;
1081} 1073}
@@ -1084,18 +1076,18 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1084{ 1076{
1085 if ((flag != 0) && (flag != 1)) { 1077 if ((flag != 0) && (flag != 1)) {
1086 printk(KERN_ERR "Illegal value %d\n", flag); 1078 printk(KERN_ERR "Illegal value %d\n", flag);
1087 return -1; 1079 return -EINVAL;
1088 } 1080 }
1089 /* 1081 /*
1090 * We expect this value to be non-zero when generic Block Layer 1082 * We expect this value to be non-zero when generic Block Layer
1091 * Discard supported is detected iblock_create_virtdevice(). 1083 * Discard supported is detected iblock_create_virtdevice().
1092 */ 1084 */
1093 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { 1085 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
1094 printk(KERN_ERR "Generic Block Discard not supported\n"); 1086 printk(KERN_ERR "Generic Block Discard not supported\n");
1095 return -ENOSYS; 1087 return -ENOSYS;
1096 } 1088 }
1097 1089
1098 DEV_ATTRIB(dev)->emulate_tpu = flag; 1090 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1099 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 1091 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1100 dev, flag); 1092 dev, flag);
1101 return 0; 1093 return 0;
@@ -1105,18 +1097,18 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1105{ 1097{
1106 if ((flag != 0) && (flag != 1)) { 1098 if ((flag != 0) && (flag != 1)) {
1107 printk(KERN_ERR "Illegal value %d\n", flag); 1099 printk(KERN_ERR "Illegal value %d\n", flag);
1108 return -1; 1100 return -EINVAL;
1109 } 1101 }
1110 /* 1102 /*
1111 * We expect this value to be non-zero when generic Block Layer 1103 * We expect this value to be non-zero when generic Block Layer
1112 * Discard supported is detected iblock_create_virtdevice(). 1104 * Discard supported is detected iblock_create_virtdevice().
1113 */ 1105 */
1114 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { 1106 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
1115 printk(KERN_ERR "Generic Block Discard not supported\n"); 1107 printk(KERN_ERR "Generic Block Discard not supported\n");
1116 return -ENOSYS; 1108 return -ENOSYS;
1117 } 1109 }
1118 1110
1119 DEV_ATTRIB(dev)->emulate_tpws = flag; 1111 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1120 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 1112 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1121 dev, flag); 1113 dev, flag);
1122 return 0; 1114 return 0;
@@ -1126,11 +1118,11 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1126{ 1118{
1127 if ((flag != 0) && (flag != 1)) { 1119 if ((flag != 0) && (flag != 1)) {
1128 printk(KERN_ERR "Illegal value %d\n", flag); 1120 printk(KERN_ERR "Illegal value %d\n", flag);
1129 return -1; 1121 return -EINVAL;
1130 } 1122 }
1131 DEV_ATTRIB(dev)->enforce_pr_isids = flag; 1123 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1132 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1124 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1133 (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); 1125 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1134 return 0; 1126 return 0;
1135} 1127}
1136 1128
@@ -1145,35 +1137,35 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1145 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" 1137 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
1146 " dev_export_obj: %d count exists\n", dev, 1138 " dev_export_obj: %d count exists\n", dev,
1147 atomic_read(&dev->dev_export_obj.obj_access_count)); 1139 atomic_read(&dev->dev_export_obj.obj_access_count));
1148 return -1; 1140 return -EINVAL;
1149 } 1141 }
1150 if (!(queue_depth)) { 1142 if (!(queue_depth)) {
1151 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" 1143 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
1152 "_depth\n", dev); 1144 "_depth\n", dev);
1153 return -1; 1145 return -EINVAL;
1154 } 1146 }
1155 1147
1156 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1148 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1157 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { 1149 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1158 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" 1150 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
1159 " exceeds TCM/SE_Device TCQ: %u\n", 1151 " exceeds TCM/SE_Device TCQ: %u\n",
1160 dev, queue_depth, 1152 dev, queue_depth,
1161 DEV_ATTRIB(dev)->hw_queue_depth); 1153 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1162 return -1; 1154 return -EINVAL;
1163 } 1155 }
1164 } else { 1156 } else {
1165 if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { 1157 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1166 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { 1158 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1167 printk(KERN_ERR "dev[%p]: Passed queue_depth:" 1159 printk(KERN_ERR "dev[%p]: Passed queue_depth:"
1168 " %u exceeds TCM/SE_Device MAX" 1160 " %u exceeds TCM/SE_Device MAX"
1169 " TCQ: %u\n", dev, queue_depth, 1161 " TCQ: %u\n", dev, queue_depth,
1170 DEV_ATTRIB(dev)->hw_queue_depth); 1162 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1171 return -1; 1163 return -EINVAL;
1172 } 1164 }
1173 } 1165 }
1174 } 1166 }
1175 1167
1176 DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; 1168 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1177 if (queue_depth > orig_queue_depth) 1169 if (queue_depth > orig_queue_depth)
1178 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); 1170 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1179 else if (queue_depth < orig_queue_depth) 1171 else if (queue_depth < orig_queue_depth)
@@ -1192,46 +1184,46 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1192 printk(KERN_ERR "dev[%p]: Unable to change SE Device" 1184 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1193 " max_sectors while dev_export_obj: %d count exists\n", 1185 " max_sectors while dev_export_obj: %d count exists\n",
1194 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1186 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1195 return -1; 1187 return -EINVAL;
1196 } 1188 }
1197 if (!(max_sectors)) { 1189 if (!(max_sectors)) {
1198 printk(KERN_ERR "dev[%p]: Illegal ZERO value for" 1190 printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
1199 " max_sectors\n", dev); 1191 " max_sectors\n", dev);
1200 return -1; 1192 return -EINVAL;
1201 } 1193 }
1202 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1194 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1203 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" 1195 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
1204 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, 1196 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1205 DA_STATUS_MAX_SECTORS_MIN); 1197 DA_STATUS_MAX_SECTORS_MIN);
1206 return -1; 1198 return -EINVAL;
1207 } 1199 }
1208 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1200 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1209 if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { 1201 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1210 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1202 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1211 " greater than TCM/SE_Device max_sectors:" 1203 " greater than TCM/SE_Device max_sectors:"
1212 " %u\n", dev, max_sectors, 1204 " %u\n", dev, max_sectors,
1213 DEV_ATTRIB(dev)->hw_max_sectors); 1205 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1214 return -1; 1206 return -EINVAL;
1215 } 1207 }
1216 } else { 1208 } else {
1217 if (!(force) && (max_sectors > 1209 if (!(force) && (max_sectors >
1218 DEV_ATTRIB(dev)->hw_max_sectors)) { 1210 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1219 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1211 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1220 " greater than TCM/SE_Device max_sectors" 1212 " greater than TCM/SE_Device max_sectors"
1221 ": %u, use force=1 to override.\n", dev, 1213 ": %u, use force=1 to override.\n", dev,
1222 max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); 1214 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1223 return -1; 1215 return -EINVAL;
1224 } 1216 }
1225 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1217 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1226 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" 1218 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1227 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1219 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1228 " %u\n", dev, max_sectors, 1220 " %u\n", dev, max_sectors,
1229 DA_STATUS_MAX_SECTORS_MAX); 1221 DA_STATUS_MAX_SECTORS_MAX);
1230 return -1; 1222 return -EINVAL;
1231 } 1223 }
1232 } 1224 }
1233 1225
1234 DEV_ATTRIB(dev)->max_sectors = max_sectors; 1226 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1235 printk("dev[%p]: SE Device max_sectors changed to %u\n", 1227 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1236 dev, max_sectors); 1228 dev, max_sectors);
1237 return 0; 1229 return 0;
@@ -1245,19 +1237,19 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1245 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1237 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1246 return -EINVAL; 1238 return -EINVAL;
1247 } 1239 }
1248 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1240 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1249 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" 1241 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
1250 " changed for TCM/pSCSI\n", dev); 1242 " changed for TCM/pSCSI\n", dev);
1251 return -EINVAL; 1243 return -EINVAL;
1252 } 1244 }
1253 if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { 1245 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1254 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" 1246 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
1255 " greater than max_sectors: %u\n", dev, 1247 " greater than max_sectors: %u\n", dev,
1256 optimal_sectors, DEV_ATTRIB(dev)->max_sectors); 1248 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1257 return -EINVAL; 1249 return -EINVAL;
1258 } 1250 }
1259 1251
1260 DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; 1252 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1261 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", 1253 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
1262 dev, optimal_sectors); 1254 dev, optimal_sectors);
1263 return 0; 1255 return 0;
@@ -1269,7 +1261,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1269 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" 1261 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
1270 " while dev_export_obj: %d count exists\n", dev, 1262 " while dev_export_obj: %d count exists\n", dev,
1271 atomic_read(&dev->dev_export_obj.obj_access_count)); 1263 atomic_read(&dev->dev_export_obj.obj_access_count));
1272 return -1; 1264 return -EINVAL;
1273 } 1265 }
1274 1266
1275 if ((block_size != 512) && 1267 if ((block_size != 512) &&
@@ -1279,17 +1271,17 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1279 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" 1271 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
1280 " for SE device, must be 512, 1024, 2048 or 4096\n", 1272 " for SE device, must be 512, 1024, 2048 or 4096\n",
1281 dev, block_size); 1273 dev, block_size);
1282 return -1; 1274 return -EINVAL;
1283 } 1275 }
1284 1276
1285 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1277 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1286 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" 1278 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
1287 " Physical Device, use for Linux/SCSI to change" 1279 " Physical Device, use for Linux/SCSI to change"
1288 " block_size for underlying hardware\n", dev); 1280 " block_size for underlying hardware\n", dev);
1289 return -1; 1281 return -EINVAL;
1290 } 1282 }
1291 1283
1292 DEV_ATTRIB(dev)->block_size = block_size; 1284 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1293 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", 1285 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
1294 dev, block_size); 1286 dev, block_size);
1295 return 0; 1287 return 0;
@@ -1323,14 +1315,14 @@ struct se_lun *core_dev_add_lun(
1323 return NULL; 1315 return NULL;
1324 1316
1325 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1317 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1326 " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), 1318 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1327 TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, 1319 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1328 TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); 1320 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1329 /* 1321 /*
1330 * Update LUN maps for dynamically added initiators when 1322 * Update LUN maps for dynamically added initiators when
1331 * generate_node_acl is enabled. 1323 * generate_node_acl is enabled.
1332 */ 1324 */
1333 if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { 1325 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1334 struct se_node_acl *acl; 1326 struct se_node_acl *acl;
1335 spin_lock_bh(&tpg->acl_node_lock); 1327 spin_lock_bh(&tpg->acl_node_lock);
1336 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1328 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
@@ -1364,9 +1356,9 @@ int core_dev_del_lun(
1364 core_tpg_post_dellun(tpg, lun); 1356 core_tpg_post_dellun(tpg, lun);
1365 1357
1366 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1358 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1367 " device object\n", TPG_TFO(tpg)->get_fabric_name(), 1359 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1368 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, 1360 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1369 TPG_TFO(tpg)->get_fabric_name()); 1361 tpg->se_tpg_tfo->get_fabric_name());
1370 1362
1371 return 0; 1363 return 0;
1372} 1364}
@@ -1379,9 +1371,9 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
1379 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1371 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1380 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1372 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1381 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1373 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1382 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, 1374 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1383 TRANSPORT_MAX_LUNS_PER_TPG-1, 1375 TRANSPORT_MAX_LUNS_PER_TPG-1,
1384 TPG_TFO(tpg)->tpg_get_tag(tpg)); 1376 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1385 spin_unlock(&tpg->tpg_lun_lock); 1377 spin_unlock(&tpg->tpg_lun_lock);
1386 return NULL; 1378 return NULL;
1387 } 1379 }
@@ -1390,8 +1382,8 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
1390 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1382 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1391 printk(KERN_ERR "%s Logical Unit Number: %u is not free on" 1383 printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
1392 " Target Portal Group: %hu, ignoring request.\n", 1384 " Target Portal Group: %hu, ignoring request.\n",
1393 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, 1385 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1394 TPG_TFO(tpg)->tpg_get_tag(tpg)); 1386 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1395 spin_unlock(&tpg->tpg_lun_lock); 1387 spin_unlock(&tpg->tpg_lun_lock);
1396 return NULL; 1388 return NULL;
1397 } 1389 }
@@ -1412,9 +1404,9 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1412 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1404 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1413 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1405 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1414 "_TPG-1: %u for Target Portal Group: %hu\n", 1406 "_TPG-1: %u for Target Portal Group: %hu\n",
1415 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, 1407 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1416 TRANSPORT_MAX_LUNS_PER_TPG-1, 1408 TRANSPORT_MAX_LUNS_PER_TPG-1,
1417 TPG_TFO(tpg)->tpg_get_tag(tpg)); 1409 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1418 spin_unlock(&tpg->tpg_lun_lock); 1410 spin_unlock(&tpg->tpg_lun_lock);
1419 return NULL; 1411 return NULL;
1420 } 1412 }
@@ -1423,8 +1415,8 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1423 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1415 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1424 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1416 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1425 " Target Portal Group: %hu, ignoring request.\n", 1417 " Target Portal Group: %hu, ignoring request.\n",
1426 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, 1418 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1427 TPG_TFO(tpg)->tpg_get_tag(tpg)); 1419 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1428 spin_unlock(&tpg->tpg_lun_lock); 1420 spin_unlock(&tpg->tpg_lun_lock);
1429 return NULL; 1421 return NULL;
1430 } 1422 }
@@ -1444,7 +1436,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1444 1436
1445 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1437 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1446 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", 1438 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1447 TPG_TFO(tpg)->get_fabric_name()); 1439 tpg->se_tpg_tfo->get_fabric_name());
1448 *ret = -EOVERFLOW; 1440 *ret = -EOVERFLOW;
1449 return NULL; 1441 return NULL;
1450 } 1442 }
@@ -1481,8 +1473,8 @@ int core_dev_add_initiator_node_lun_acl(
1481 if (!(lun)) { 1473 if (!(lun)) {
1482 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 1474 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1483 " Target Portal Group: %hu, ignoring request.\n", 1475 " Target Portal Group: %hu, ignoring request.\n",
1484 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, 1476 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1485 TPG_TFO(tpg)->tpg_get_tag(tpg)); 1477 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1486 return -EINVAL; 1478 return -EINVAL;
1487 } 1479 }
1488 1480
@@ -1507,8 +1499,8 @@ int core_dev_add_initiator_node_lun_acl(
1507 spin_unlock(&lun->lun_acl_lock); 1499 spin_unlock(&lun->lun_acl_lock);
1508 1500
1509 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1501 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1510 " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), 1502 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1511 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1503 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1512 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1504 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1513 lacl->initiatorname); 1505 lacl->initiatorname);
1514 /* 1506 /*
@@ -1547,8 +1539,8 @@ int core_dev_del_initiator_node_lun_acl(
1547 1539
1548 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1540 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1549 " InitiatorNode: %s Mapped LUN: %u\n", 1541 " InitiatorNode: %s Mapped LUN: %u\n",
1550 TPG_TFO(tpg)->get_fabric_name(), 1542 tpg->se_tpg_tfo->get_fabric_name(),
1551 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, 1543 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1552 lacl->initiatorname, lacl->mapped_lun); 1544 lacl->initiatorname, lacl->mapped_lun);
1553 1545
1554 return 0; 1546 return 0;
@@ -1559,9 +1551,9 @@ void core_dev_free_initiator_node_lun_acl(
1559 struct se_lun_acl *lacl) 1551 struct se_lun_acl *lacl)
1560{ 1552{
1561 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1553 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1562 " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), 1554 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1563 TPG_TFO(tpg)->tpg_get_tag(tpg), 1555 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1564 TPG_TFO(tpg)->get_fabric_name(), 1556 tpg->se_tpg_tfo->get_fabric_name(),
1565 lacl->initiatorname, lacl->mapped_lun); 1557 lacl->initiatorname, lacl->mapped_lun);
1566 1558
1567 kfree(lacl); 1559 kfree(lacl);
@@ -1580,7 +1572,7 @@ int core_dev_setup_virtual_lun0(void)
1580 if (IS_ERR(hba)) 1572 if (IS_ERR(hba))
1581 return PTR_ERR(hba); 1573 return PTR_ERR(hba);
1582 1574
1583 se_global->g_lun0_hba = hba; 1575 lun0_hba = hba;
1584 t = hba->transport; 1576 t = hba->transport;
1585 1577
1586 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 1578 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
@@ -1590,17 +1582,17 @@ int core_dev_setup_virtual_lun0(void)
1590 ret = -ENOMEM; 1582 ret = -ENOMEM;
1591 goto out; 1583 goto out;
1592 } 1584 }
1593 INIT_LIST_HEAD(&se_dev->g_se_dev_list); 1585 INIT_LIST_HEAD(&se_dev->se_dev_node);
1594 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 1586 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1595 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 1587 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1596 INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); 1588 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1597 INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); 1589 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1598 spin_lock_init(&se_dev->t10_reservation.registration_lock); 1590 spin_lock_init(&se_dev->t10_pr.registration_lock);
1599 spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); 1591 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1600 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); 1592 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1601 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); 1593 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1602 spin_lock_init(&se_dev->se_dev_lock); 1594 spin_lock_init(&se_dev->se_dev_lock);
1603 se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 1595 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1604 se_dev->t10_wwn.t10_sub_dev = se_dev; 1596 se_dev->t10_wwn.t10_sub_dev = se_dev;
1605 se_dev->t10_alua.t10_sub_dev = se_dev; 1597 se_dev->t10_alua.t10_sub_dev = se_dev;
1606 se_dev->se_dev_attrib.da_sub_dev = se_dev; 1598 se_dev->se_dev_attrib.da_sub_dev = se_dev;
@@ -1613,27 +1605,27 @@ int core_dev_setup_virtual_lun0(void)
1613 ret = -ENOMEM; 1605 ret = -ENOMEM;
1614 goto out; 1606 goto out;
1615 } 1607 }
1616 se_global->g_lun0_su_dev = se_dev; 1608 lun0_su_dev = se_dev;
1617 1609
1618 memset(buf, 0, 16); 1610 memset(buf, 0, 16);
1619 sprintf(buf, "rd_pages=8"); 1611 sprintf(buf, "rd_pages=8");
1620 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); 1612 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1621 1613
1622 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1614 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1623 if (!(dev) || IS_ERR(dev)) { 1615 if (IS_ERR(dev)) {
1624 ret = -ENOMEM; 1616 ret = PTR_ERR(dev);
1625 goto out; 1617 goto out;
1626 } 1618 }
1627 se_dev->se_dev_ptr = dev; 1619 se_dev->se_dev_ptr = dev;
1628 se_global->g_lun0_dev = dev; 1620 g_lun0_dev = dev;
1629 1621
1630 return 0; 1622 return 0;
1631out: 1623out:
1632 se_global->g_lun0_su_dev = NULL; 1624 lun0_su_dev = NULL;
1633 kfree(se_dev); 1625 kfree(se_dev);
1634 if (se_global->g_lun0_hba) { 1626 if (lun0_hba) {
1635 core_delete_hba(se_global->g_lun0_hba); 1627 core_delete_hba(lun0_hba);
1636 se_global->g_lun0_hba = NULL; 1628 lun0_hba = NULL;
1637 } 1629 }
1638 return ret; 1630 return ret;
1639} 1631}
@@ -1641,14 +1633,14 @@ out:
1641 1633
1642void core_dev_release_virtual_lun0(void) 1634void core_dev_release_virtual_lun0(void)
1643{ 1635{
1644 struct se_hba *hba = se_global->g_lun0_hba; 1636 struct se_hba *hba = lun0_hba;
1645 struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; 1637 struct se_subsystem_dev *su_dev = lun0_su_dev;
1646 1638
1647 if (!(hba)) 1639 if (!(hba))
1648 return; 1640 return;
1649 1641
1650 if (se_global->g_lun0_dev) 1642 if (g_lun0_dev)
1651 se_free_virtual_device(se_global->g_lun0_dev, hba); 1643 se_free_virtual_device(g_lun0_dev, hba);
1652 1644
1653 kfree(su_dev); 1645 kfree(su_dev);
1654 core_delete_hba(hba); 1646 core_delete_hba(hba);