aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-10-08 00:03:19 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2012-11-06 23:55:43 -0500
commit0fd97ccf45be26fb01b3a412f1f6c6b5044b2f16 (patch)
treec642e3da11e534a311a1e998ef740a3d44b9187b /drivers
parent3d70f8c617a436c7146ecb81df2265b4626dfe89 (diff)
target: kill struct se_subsystem_dev
Simplify the code a lot by killing the superflous struct se_subsystem_dev. Instead se_device is allocated early on by the backend driver, which allocates it as part of its own per-device structure, borrowing the scheme that is for example used for inode allocation. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/target_core_alua.c152
-rw-r--r--drivers/target/target_core_alua.h4
-rw-r--r--drivers/target/target_core_configfs.c528
-rw-r--r--drivers/target/target_core_device.c613
-rw-r--r--drivers/target/target_core_fabric_configfs.c14
-rw-r--r--drivers/target/target_core_file.c138
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_hba.c4
-rw-r--r--drivers/target/target_core_iblock.c185
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h12
-rw-r--r--drivers/target/target_core_pr.c148
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c270
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/target/target_core_rd.c94
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_sbc.c31
-rw-r--r--drivers/target/target_core_spc.c91
-rw-r--r--drivers/target/target_core_stat.c307
-rw-r--r--drivers/target/target_core_tmr.c6
-rw-r--r--drivers/target/target_core_transport.c217
-rw-r--r--drivers/target/target_core_ua.c6
24 files changed, 1011 insertions, 1818 deletions
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7b54893db665..dd7a84ee78e1 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -53,7 +53,6 @@ struct tcm_loop_hba {
53 struct se_hba_s *se_hba; 53 struct se_hba_s *se_hba;
54 struct se_lun *tl_hba_lun; 54 struct se_lun *tl_hba_lun;
55 struct se_port *tl_hba_lun_sep; 55 struct se_port *tl_hba_lun_sep;
56 struct se_device_s *se_dev_hba_ptr;
57 struct tcm_loop_nexus *tl_nexus; 56 struct tcm_loop_nexus *tl_nexus;
58 struct device dev; 57 struct device dev;
59 struct Scsi_Host *sh; 58 struct Scsi_Host *sh;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 9a5f9a7aecd2..15c127b780d8 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -61,7 +61,7 @@ struct t10_alua_lu_gp *default_lu_gp;
61 */ 61 */
62int target_emulate_report_target_port_groups(struct se_cmd *cmd) 62int target_emulate_report_target_port_groups(struct se_cmd *cmd)
63{ 63{
64 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 64 struct se_device *dev = cmd->se_dev;
65 struct se_port *port; 65 struct se_port *port;
66 struct t10_alua_tg_pt_gp *tg_pt_gp; 66 struct t10_alua_tg_pt_gp *tg_pt_gp;
67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
@@ -86,8 +86,8 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
86 } 86 }
87 buf = transport_kmap_data_sg(cmd); 87 buf = transport_kmap_data_sg(cmd);
88 88
89 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 89 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
90 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 90 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
91 tg_pt_gp_list) { 91 tg_pt_gp_list) {
92 /* 92 /*
93 * Check if the Target port group and Target port descriptor list 93 * Check if the Target port group and Target port descriptor list
@@ -160,7 +160,7 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
160 } 160 }
161 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 161 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
162 } 162 }
163 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 163 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
164 /* 164 /*
165 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 165 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
166 */ 166 */
@@ -203,7 +203,6 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
203int target_emulate_set_target_port_groups(struct se_cmd *cmd) 203int target_emulate_set_target_port_groups(struct se_cmd *cmd)
204{ 204{
205 struct se_device *dev = cmd->se_dev; 205 struct se_device *dev = cmd->se_dev;
206 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
207 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 206 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
208 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 207 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
209 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 208 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
@@ -303,9 +302,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
303 * Locate the matching target port group ID from 302 * Locate the matching target port group ID from
304 * the global tg_pt_gp list 303 * the global tg_pt_gp list
305 */ 304 */
306 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 305 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
307 list_for_each_entry(tg_pt_gp, 306 list_for_each_entry(tg_pt_gp,
308 &su_dev->t10_alua.tg_pt_gps_list, 307 &dev->t10_alua.tg_pt_gps_list,
309 tg_pt_gp_list) { 308 tg_pt_gp_list) {
310 if (!tg_pt_gp->tg_pt_gp_valid_id) 309 if (!tg_pt_gp->tg_pt_gp_valid_id)
311 continue; 310 continue;
@@ -315,18 +314,18 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
315 314
316 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 315 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
317 smp_mb__after_atomic_inc(); 316 smp_mb__after_atomic_inc();
318 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 317 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
319 318
320 rc = core_alua_do_port_transition(tg_pt_gp, 319 rc = core_alua_do_port_transition(tg_pt_gp,
321 dev, l_port, nacl, 320 dev, l_port, nacl,
322 alua_access_state, 1); 321 alua_access_state, 1);
323 322
324 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 323 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
325 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 324 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
326 smp_mb__after_atomic_dec(); 325 smp_mb__after_atomic_dec();
327 break; 326 break;
328 } 327 }
329 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 328 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
330 /* 329 /*
331 * If not matching target port group ID can be located 330 * If not matching target port group ID can be located
332 * throw an exception with ASCQ: INVALID_PARAMETER_LIST 331 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
@@ -758,8 +757,7 @@ static int core_alua_update_tpg_primary_metadata(
758 int primary_state, 757 int primary_state,
759 unsigned char *md_buf) 758 unsigned char *md_buf)
760{ 759{
761 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 760 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
762 struct t10_wwn *wwn = &su_dev->t10_wwn;
763 char path[ALUA_METADATA_PATH_LEN]; 761 char path[ALUA_METADATA_PATH_LEN];
764 int len; 762 int len;
765 763
@@ -899,7 +897,6 @@ int core_alua_do_port_transition(
899{ 897{
900 struct se_device *dev; 898 struct se_device *dev;
901 struct se_port *port; 899 struct se_port *port;
902 struct se_subsystem_dev *su_dev;
903 struct se_node_acl *nacl; 900 struct se_node_acl *nacl;
904 struct t10_alua_lu_gp *lu_gp; 901 struct t10_alua_lu_gp *lu_gp;
905 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 902 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
@@ -949,14 +946,13 @@ int core_alua_do_port_transition(
949 lu_gp_mem_list) { 946 lu_gp_mem_list) {
950 947
951 dev = lu_gp_mem->lu_gp_mem_dev; 948 dev = lu_gp_mem->lu_gp_mem_dev;
952 su_dev = dev->se_sub_dev;
953 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 949 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
954 smp_mb__after_atomic_inc(); 950 smp_mb__after_atomic_inc();
955 spin_unlock(&lu_gp->lu_gp_lock); 951 spin_unlock(&lu_gp->lu_gp_lock);
956 952
957 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 953 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
958 list_for_each_entry(tg_pt_gp, 954 list_for_each_entry(tg_pt_gp,
959 &su_dev->t10_alua.tg_pt_gps_list, 955 &dev->t10_alua.tg_pt_gps_list,
960 tg_pt_gp_list) { 956 tg_pt_gp_list) {
961 957
962 if (!tg_pt_gp->tg_pt_gp_valid_id) 958 if (!tg_pt_gp->tg_pt_gp_valid_id)
@@ -981,7 +977,7 @@ int core_alua_do_port_transition(
981 } 977 }
982 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 978 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
983 smp_mb__after_atomic_inc(); 979 smp_mb__after_atomic_inc();
984 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 980 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
985 /* 981 /*
986 * core_alua_do_transition_tg_pt() will always return 982 * core_alua_do_transition_tg_pt() will always return
987 * success. 983 * success.
@@ -989,11 +985,11 @@ int core_alua_do_port_transition(
989 core_alua_do_transition_tg_pt(tg_pt_gp, port, 985 core_alua_do_transition_tg_pt(tg_pt_gp, port,
990 nacl, md_buf, new_state, explict); 986 nacl, md_buf, new_state, explict);
991 987
992 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 988 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
993 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 989 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
994 smp_mb__after_atomic_dec(); 990 smp_mb__after_atomic_dec();
995 } 991 }
996 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 992 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
997 993
998 spin_lock(&lu_gp->lu_gp_lock); 994 spin_lock(&lu_gp->lu_gp_lock);
999 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 995 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@@ -1268,8 +1264,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1268 1264
1269void core_alua_free_lu_gp_mem(struct se_device *dev) 1265void core_alua_free_lu_gp_mem(struct se_device *dev)
1270{ 1266{
1271 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1267 struct t10_alua *alua = &dev->t10_alua;
1272 struct t10_alua *alua = &su_dev->t10_alua;
1273 struct t10_alua_lu_gp *lu_gp; 1268 struct t10_alua_lu_gp *lu_gp;
1274 struct t10_alua_lu_gp_member *lu_gp_mem; 1269 struct t10_alua_lu_gp_member *lu_gp_mem;
1275 1270
@@ -1358,10 +1353,8 @@ void __core_alua_drop_lu_gp_mem(
1358 spin_unlock(&lu_gp->lu_gp_lock); 1353 spin_unlock(&lu_gp->lu_gp_lock);
1359} 1354}
1360 1355
1361struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( 1356struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1362 struct se_subsystem_dev *su_dev, 1357 const char *name, int def_group)
1363 const char *name,
1364 int def_group)
1365{ 1358{
1366 struct t10_alua_tg_pt_gp *tg_pt_gp; 1359 struct t10_alua_tg_pt_gp *tg_pt_gp;
1367 1360
@@ -1375,7 +1368,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1375 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1368 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1376 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1369 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1377 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1370 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1378 tg_pt_gp->tg_pt_gp_su_dev = su_dev; 1371 tg_pt_gp->tg_pt_gp_dev = dev;
1379 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1372 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1380 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1373 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1381 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); 1374 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
@@ -1392,14 +1385,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1392 tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; 1385 tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
1393 1386
1394 if (def_group) { 1387 if (def_group) {
1395 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1388 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1396 tg_pt_gp->tg_pt_gp_id = 1389 tg_pt_gp->tg_pt_gp_id =
1397 su_dev->t10_alua.alua_tg_pt_gps_counter++; 1390 dev->t10_alua.alua_tg_pt_gps_counter++;
1398 tg_pt_gp->tg_pt_gp_valid_id = 1; 1391 tg_pt_gp->tg_pt_gp_valid_id = 1;
1399 su_dev->t10_alua.alua_tg_pt_gps_count++; 1392 dev->t10_alua.alua_tg_pt_gps_count++;
1400 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1393 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1401 &su_dev->t10_alua.tg_pt_gps_list); 1394 &dev->t10_alua.tg_pt_gps_list);
1402 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1395 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1403 } 1396 }
1404 1397
1405 return tg_pt_gp; 1398 return tg_pt_gp;
@@ -1409,9 +1402,10 @@ int core_alua_set_tg_pt_gp_id(
1409 struct t10_alua_tg_pt_gp *tg_pt_gp, 1402 struct t10_alua_tg_pt_gp *tg_pt_gp,
1410 u16 tg_pt_gp_id) 1403 u16 tg_pt_gp_id)
1411{ 1404{
1412 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1405 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1413 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1406 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1414 u16 tg_pt_gp_id_tmp; 1407 u16 tg_pt_gp_id_tmp;
1408
1415 /* 1409 /*
1416 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1410 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1417 */ 1411 */
@@ -1421,19 +1415,19 @@ int core_alua_set_tg_pt_gp_id(
1421 return -EINVAL; 1415 return -EINVAL;
1422 } 1416 }
1423 1417
1424 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1418 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1425 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1419 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1426 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1420 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1427 " 0x0000ffff reached\n"); 1421 " 0x0000ffff reached\n");
1428 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1422 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1429 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1423 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1430 return -ENOSPC; 1424 return -ENOSPC;
1431 } 1425 }
1432again: 1426again:
1433 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1427 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1434 su_dev->t10_alua.alua_tg_pt_gps_counter++; 1428 dev->t10_alua.alua_tg_pt_gps_counter++;
1435 1429
1436 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, 1430 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1437 tg_pt_gp_list) { 1431 tg_pt_gp_list) {
1438 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1432 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1439 if (!tg_pt_gp_id) 1433 if (!tg_pt_gp_id)
@@ -1441,7 +1435,7 @@ again:
1441 1435
1442 pr_err("ALUA Target Port Group ID: %hu already" 1436 pr_err("ALUA Target Port Group ID: %hu already"
1443 " exists, ignoring request\n", tg_pt_gp_id); 1437 " exists, ignoring request\n", tg_pt_gp_id);
1444 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1438 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1445 return -EINVAL; 1439 return -EINVAL;
1446 } 1440 }
1447 } 1441 }
@@ -1449,9 +1443,9 @@ again:
1449 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1443 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1450 tg_pt_gp->tg_pt_gp_valid_id = 1; 1444 tg_pt_gp->tg_pt_gp_valid_id = 1;
1451 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1445 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1452 &su_dev->t10_alua.tg_pt_gps_list); 1446 &dev->t10_alua.tg_pt_gps_list);
1453 su_dev->t10_alua.alua_tg_pt_gps_count++; 1447 dev->t10_alua.alua_tg_pt_gps_count++;
1454 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1448 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1455 1449
1456 return 0; 1450 return 0;
1457} 1451}
@@ -1480,8 +1474,9 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1480void core_alua_free_tg_pt_gp( 1474void core_alua_free_tg_pt_gp(
1481 struct t10_alua_tg_pt_gp *tg_pt_gp) 1475 struct t10_alua_tg_pt_gp *tg_pt_gp)
1482{ 1476{
1483 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1477 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1484 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; 1478 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1479
1485 /* 1480 /*
1486 * Once we have reached this point, config_item_put() has already 1481 * Once we have reached this point, config_item_put() has already
1487 * been called from target_core_alua_drop_tg_pt_gp(). 1482 * been called from target_core_alua_drop_tg_pt_gp().
@@ -1490,10 +1485,11 @@ void core_alua_free_tg_pt_gp(
1490 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS 1485 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1491 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1486 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1492 */ 1487 */
1493 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1488 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1494 list_del(&tg_pt_gp->tg_pt_gp_list); 1489 list_del(&tg_pt_gp->tg_pt_gp_list);
1495 su_dev->t10_alua.alua_tg_pt_gps_counter--; 1490 dev->t10_alua.alua_tg_pt_gps_counter--;
1496 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1491 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1492
1497 /* 1493 /*
1498 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1494 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1499 * core_alua_get_tg_pt_gp_by_name() in 1495 * core_alua_get_tg_pt_gp_by_name() in
@@ -1502,6 +1498,7 @@ void core_alua_free_tg_pt_gp(
1502 */ 1498 */
1503 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1499 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1504 cpu_relax(); 1500 cpu_relax();
1501
1505 /* 1502 /*
1506 * Release reference to struct t10_alua_tg_pt_gp from all associated 1503 * Release reference to struct t10_alua_tg_pt_gp from all associated
1507 * struct se_port. 1504 * struct se_port.
@@ -1525,9 +1522,9 @@ void core_alua_free_tg_pt_gp(
1525 * default_tg_pt_gp. 1522 * default_tg_pt_gp.
1526 */ 1523 */
1527 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1524 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1528 if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { 1525 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1529 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1526 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1530 su_dev->t10_alua.default_tg_pt_gp); 1527 dev->t10_alua.default_tg_pt_gp);
1531 } else 1528 } else
1532 tg_pt_gp_mem->tg_pt_gp = NULL; 1529 tg_pt_gp_mem->tg_pt_gp = NULL;
1533 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1530 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1541,8 +1538,7 @@ void core_alua_free_tg_pt_gp(
1541 1538
1542void core_alua_free_tg_pt_gp_mem(struct se_port *port) 1539void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1543{ 1540{
1544 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1541 struct t10_alua *alua = &port->sep_lun->lun_se_dev->t10_alua;
1545 struct t10_alua *alua = &su_dev->t10_alua;
1546 struct t10_alua_tg_pt_gp *tg_pt_gp; 1542 struct t10_alua_tg_pt_gp *tg_pt_gp;
1547 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1543 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1548 1544
@@ -1574,25 +1570,24 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1574} 1570}
1575 1571
1576static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1572static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1577 struct se_subsystem_dev *su_dev, 1573 struct se_device *dev, const char *name)
1578 const char *name)
1579{ 1574{
1580 struct t10_alua_tg_pt_gp *tg_pt_gp; 1575 struct t10_alua_tg_pt_gp *tg_pt_gp;
1581 struct config_item *ci; 1576 struct config_item *ci;
1582 1577
1583 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1578 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1584 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 1579 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1585 tg_pt_gp_list) { 1580 tg_pt_gp_list) {
1586 if (!tg_pt_gp->tg_pt_gp_valid_id) 1581 if (!tg_pt_gp->tg_pt_gp_valid_id)
1587 continue; 1582 continue;
1588 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1583 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1589 if (!strcmp(config_item_name(ci), name)) { 1584 if (!strcmp(config_item_name(ci), name)) {
1590 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1585 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1591 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1586 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1592 return tg_pt_gp; 1587 return tg_pt_gp;
1593 } 1588 }
1594 } 1589 }
1595 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1590 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1596 1591
1597 return NULL; 1592 return NULL;
1598} 1593}
@@ -1600,11 +1595,11 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1600static void core_alua_put_tg_pt_gp_from_name( 1595static void core_alua_put_tg_pt_gp_from_name(
1601 struct t10_alua_tg_pt_gp *tg_pt_gp) 1596 struct t10_alua_tg_pt_gp *tg_pt_gp)
1602{ 1597{
1603 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1598 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1604 1599
1605 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1600 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1606 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1601 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1607 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1602 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1608} 1603}
1609 1604
1610/* 1605/*
@@ -1640,9 +1635,8 @@ static void __core_alua_drop_tg_pt_gp_mem(
1640 1635
1641ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) 1636ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1642{ 1637{
1643 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1644 struct config_item *tg_pt_ci; 1638 struct config_item *tg_pt_ci;
1645 struct t10_alua *alua = &su_dev->t10_alua; 1639 struct t10_alua *alua = &port->sep_lun->lun_se_dev->t10_alua;
1646 struct t10_alua_tg_pt_gp *tg_pt_gp; 1640 struct t10_alua_tg_pt_gp *tg_pt_gp;
1647 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1641 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1648 ssize_t len = 0; 1642 ssize_t len = 0;
@@ -1683,7 +1677,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1683{ 1677{
1684 struct se_portal_group *tpg; 1678 struct se_portal_group *tpg;
1685 struct se_lun *lun; 1679 struct se_lun *lun;
1686 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1680 struct se_device *dev = port->sep_lun->lun_se_dev;
1687 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1681 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1688 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1682 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1689 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1683 unsigned char buf[TG_PT_GROUP_NAME_BUF];
@@ -1692,7 +1686,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1692 tpg = port->sep_tpg; 1686 tpg = port->sep_tpg;
1693 lun = port->sep_lun; 1687 lun = port->sep_lun;
1694 1688
1695 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1689 if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1696 pr_warn("SPC3_ALUA_EMULATED not enabled for" 1690 pr_warn("SPC3_ALUA_EMULATED not enabled for"
1697 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1691 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1698 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1692 tpg->se_tpg_tfo->tpg_get_tag(tpg),
@@ -1716,7 +1710,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1716 * struct t10_alua_tg_pt_gp. This reference is released with 1710 * struct t10_alua_tg_pt_gp. This reference is released with
1717 * core_alua_put_tg_pt_gp_from_name() below. 1711 * core_alua_put_tg_pt_gp_from_name() below.
1718 */ 1712 */
1719 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, 1713 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1720 strstrip(buf)); 1714 strstrip(buf));
1721 if (!tg_pt_gp_new) 1715 if (!tg_pt_gp_new)
1722 return -ENODEV; 1716 return -ENODEV;
@@ -1750,7 +1744,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1750 1744
1751 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1745 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1752 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1746 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1753 su_dev->t10_alua.default_tg_pt_gp); 1747 dev->t10_alua.default_tg_pt_gp);
1754 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1748 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1755 1749
1756 return count; 1750 return count;
@@ -2054,32 +2048,29 @@ ssize_t core_alua_store_secondary_write_metadata(
2054 return count; 2048 return count;
2055} 2049}
2056 2050
2057int core_setup_alua(struct se_device *dev, int force_pt) 2051int core_setup_alua(struct se_device *dev)
2058{ 2052{
2059 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 2053 struct t10_alua *alua = &dev->t10_alua;
2060 struct t10_alua *alua = &su_dev->t10_alua;
2061 struct t10_alua_lu_gp_member *lu_gp_mem; 2054 struct t10_alua_lu_gp_member *lu_gp_mem;
2055
2062 /* 2056 /*
2063 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic 2057 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
2064 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can 2058 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
2065 * cause a problem because libata and some SATA RAID HBAs appear 2059 * cause a problem because libata and some SATA RAID HBAs appear
2066 * under Linux/SCSI, but emulate SCSI logic themselves. 2060 * under Linux/SCSI, but emulate SCSI logic themselves.
2067 */ 2061 */
2068 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && 2062 if ((dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) ||
2069 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { 2063 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV &&
2070 alua->alua_type = SPC_ALUA_PASSTHROUGH; 2064 !dev->dev_attrib.emulate_alua)) {
2071 alua->alua_state_check = &core_alua_state_check_nop;
2072 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" 2065 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
2073 " emulation\n", dev->transport->name); 2066 " emulation\n", dev->transport->name);
2074 return 0; 2067
2075 } 2068 alua->alua_type = SPC_ALUA_PASSTHROUGH;
2076 /* 2069 alua->alua_state_check = &core_alua_state_check_nop;
2077 * If SPC-3 or above is reported by real or emulated struct se_device, 2070 } else if (dev->transport->get_device_rev(dev) >= SCSI_3) {
2078 * use emulated ALUA.
2079 */
2080 if (dev->transport->get_device_rev(dev) >= SCSI_3) {
2081 pr_debug("%s: Enabling ALUA Emulation for SPC-3" 2071 pr_debug("%s: Enabling ALUA Emulation for SPC-3"
2082 " device\n", dev->transport->name); 2072 " device\n", dev->transport->name);
2073
2083 /* 2074 /*
2084 * Associate this struct se_device with the default ALUA 2075 * Associate this struct se_device with the default ALUA
2085 * LUN Group. 2076 * LUN Group.
@@ -2099,10 +2090,11 @@ int core_setup_alua(struct se_device *dev, int force_pt)
2099 " core/alua/lu_gps/default_lu_gp\n", 2090 " core/alua/lu_gps/default_lu_gp\n",
2100 dev->transport->name); 2091 dev->transport->name);
2101 } else { 2092 } else {
2102 alua->alua_type = SPC2_ALUA_DISABLED;
2103 alua->alua_state_check = &core_alua_state_check_nop;
2104 pr_debug("%s: Disabling ALUA Emulation for SPC-2" 2093 pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2105 " device\n", dev->transport->name); 2094 " device\n", dev->transport->name);
2095
2096 alua->alua_type = SPC2_ALUA_DISABLED;
2097 alua->alua_state_check = &core_alua_state_check_nop;
2106 } 2098 }
2107 2099
2108 return 0; 2100 return 0;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index f920c170d47b..5019157ffe69 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
91 struct t10_alua_lu_gp *); 91 struct t10_alua_lu_gp *);
92extern void core_alua_drop_lu_gp_dev(struct se_device *); 92extern void core_alua_drop_lu_gp_dev(struct se_device *);
93extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( 93extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
94 struct se_subsystem_dev *, const char *, int); 94 struct se_device *, const char *, int);
95extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); 95extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
96extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 96extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
97 struct se_port *); 97 struct se_port *);
@@ -131,6 +131,6 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
131 char *); 131 char *);
132extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, 132extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
133 const char *, size_t); 133 const char *, size_t);
134extern int core_setup_alua(struct se_device *, int); 134extern int core_setup_alua(struct se_device *);
135 135
136#endif /* TARGET_CORE_ALUA_H */ 136#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c123327499a3..7272016ed05f 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -565,21 +565,8 @@ static ssize_t target_core_dev_show_attr_##_name( \
565 struct se_dev_attrib *da, \ 565 struct se_dev_attrib *da, \
566 char *page) \ 566 char *page) \
567{ \ 567{ \
568 struct se_device *dev; \ 568 return snprintf(page, PAGE_SIZE, "%u\n", \
569 struct se_subsystem_dev *se_dev = da->da_sub_dev; \ 569 (u32)da->da_dev->dev_attrib._name); \
570 ssize_t rb; \
571 \
572 spin_lock(&se_dev->se_dev_lock); \
573 dev = se_dev->se_dev_ptr; \
574 if (!dev) { \
575 spin_unlock(&se_dev->se_dev_lock); \
576 return -ENODEV; \
577 } \
578 rb = snprintf(page, PAGE_SIZE, "%u\n", \
579 (u32)dev->se_sub_dev->se_dev_attrib._name); \
580 spin_unlock(&se_dev->se_dev_lock); \
581 \
582 return rb; \
583} 570}
584 571
585#define DEF_DEV_ATTRIB_STORE(_name) \ 572#define DEF_DEV_ATTRIB_STORE(_name) \
@@ -588,26 +575,16 @@ static ssize_t target_core_dev_store_attr_##_name( \
588 const char *page, \ 575 const char *page, \
589 size_t count) \ 576 size_t count) \
590{ \ 577{ \
591 struct se_device *dev; \
592 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
593 unsigned long val; \ 578 unsigned long val; \
594 int ret; \ 579 int ret; \
595 \ 580 \
596 spin_lock(&se_dev->se_dev_lock); \
597 dev = se_dev->se_dev_ptr; \
598 if (!dev) { \
599 spin_unlock(&se_dev->se_dev_lock); \
600 return -ENODEV; \
601 } \
602 ret = strict_strtoul(page, 0, &val); \ 581 ret = strict_strtoul(page, 0, &val); \
603 if (ret < 0) { \ 582 if (ret < 0) { \
604 spin_unlock(&se_dev->se_dev_lock); \
605 pr_err("strict_strtoul() failed with" \ 583 pr_err("strict_strtoul() failed with" \
606 " ret: %d\n", ret); \ 584 " ret: %d\n", ret); \
607 return -EINVAL; \ 585 return -EINVAL; \
608 } \ 586 } \
609 ret = se_dev_set_##_name(dev, (u32)val); \ 587 ret = se_dev_set_##_name(da->da_dev, (u32)val); \
610 spin_unlock(&se_dev->se_dev_lock); \
611 \ 588 \
612 return (!ret) ? count : -EINVAL; \ 589 return (!ret) ? count : -EINVAL; \
613} 590}
@@ -764,13 +741,6 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
764 struct t10_wwn *t10_wwn, 741 struct t10_wwn *t10_wwn,
765 char *page) 742 char *page)
766{ 743{
767 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
768 struct se_device *dev;
769
770 dev = se_dev->se_dev_ptr;
771 if (!dev)
772 return -ENODEV;
773
774 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", 744 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
775 &t10_wwn->unit_serial[0]); 745 &t10_wwn->unit_serial[0]);
776} 746}
@@ -780,8 +750,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
780 const char *page, 750 const char *page,
781 size_t count) 751 size_t count)
782{ 752{
783 struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev; 753 struct se_device *dev = t10_wwn->t10_dev;
784 struct se_device *dev;
785 unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; 754 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
786 755
787 /* 756 /*
@@ -794,7 +763,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
794 * it is doing 'the right thing' wrt a world wide unique 763 * it is doing 'the right thing' wrt a world wide unique
795 * VPD Unit Serial Number that OS dependent multipath can depend on. 764 * VPD Unit Serial Number that OS dependent multipath can depend on.
796 */ 765 */
797 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { 766 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
798 pr_err("Underlying SCSI device firmware provided VPD" 767 pr_err("Underlying SCSI device firmware provided VPD"
799 " Unit Serial, ignoring request\n"); 768 " Unit Serial, ignoring request\n");
800 return -EOPNOTSUPP; 769 return -EOPNOTSUPP;
@@ -811,15 +780,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
811 * (underneath the initiator side OS dependent multipath code) 780 * (underneath the initiator side OS dependent multipath code)
812 * could cause negative effects. 781 * could cause negative effects.
813 */ 782 */
814 dev = su_dev->se_dev_ptr; 783 if (dev->export_count) {
815 if (dev) { 784 pr_err("Unable to set VPD Unit Serial while"
816 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 785 " active %d $FABRIC_MOD exports exist\n",
817 pr_err("Unable to set VPD Unit Serial while" 786 dev->export_count);
818 " active %d $FABRIC_MOD exports exist\n", 787 return -EINVAL;
819 atomic_read(&dev->dev_export_obj.obj_access_count));
820 return -EINVAL;
821 }
822 } 788 }
789
823 /* 790 /*
824 * This currently assumes ASCII encoding for emulated VPD Unit Serial. 791 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
825 * 792 *
@@ -828,12 +795,12 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
828 */ 795 */
829 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); 796 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
830 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); 797 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
831 snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, 798 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
832 "%s", strstrip(buf)); 799 "%s", strstrip(buf));
833 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; 800 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
834 801
835 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 802 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
836 " %s\n", su_dev->t10_wwn.unit_serial); 803 " %s\n", dev->t10_wwn.unit_serial);
837 804
838 return count; 805 return count;
839} 806}
@@ -847,16 +814,10 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
847 struct t10_wwn *t10_wwn, 814 struct t10_wwn *t10_wwn,
848 char *page) 815 char *page)
849{ 816{
850 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
851 struct se_device *dev;
852 struct t10_vpd *vpd; 817 struct t10_vpd *vpd;
853 unsigned char buf[VPD_TMP_BUF_SIZE]; 818 unsigned char buf[VPD_TMP_BUF_SIZE];
854 ssize_t len = 0; 819 ssize_t len = 0;
855 820
856 dev = se_dev->se_dev_ptr;
857 if (!dev)
858 return -ENODEV;
859
860 memset(buf, 0, VPD_TMP_BUF_SIZE); 821 memset(buf, 0, VPD_TMP_BUF_SIZE);
861 822
862 spin_lock(&t10_wwn->t10_vpd_lock); 823 spin_lock(&t10_wwn->t10_vpd_lock);
@@ -894,16 +855,10 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
894 struct t10_wwn *t10_wwn, \ 855 struct t10_wwn *t10_wwn, \
895 char *page) \ 856 char *page) \
896{ \ 857{ \
897 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
898 struct se_device *dev; \
899 struct t10_vpd *vpd; \ 858 struct t10_vpd *vpd; \
900 unsigned char buf[VPD_TMP_BUF_SIZE]; \ 859 unsigned char buf[VPD_TMP_BUF_SIZE]; \
901 ssize_t len = 0; \ 860 ssize_t len = 0; \
902 \ 861 \
903 dev = se_dev->se_dev_ptr; \
904 if (!dev) \
905 return -ENODEV; \
906 \
907 spin_lock(&t10_wwn->t10_vpd_lock); \ 862 spin_lock(&t10_wwn->t10_vpd_lock); \
908 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ 863 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
909 if (vpd->association != _assoc) \ 864 if (vpd->association != _assoc) \
@@ -1003,7 +958,7 @@ static struct config_item_type target_core_dev_wwn_cit = {
1003 958
1004/* Start functions for struct config_item_type target_core_dev_pr_cit */ 959/* Start functions for struct config_item_type target_core_dev_pr_cit */
1005 960
1006CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev); 961CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
1007#define SE_DEV_PR_ATTR(_name, _mode) \ 962#define SE_DEV_PR_ATTR(_name, _mode) \
1008static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ 963static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1009 __CONFIGFS_EATTR(_name, _mode, \ 964 __CONFIGFS_EATTR(_name, _mode, \
@@ -1071,23 +1026,17 @@ static ssize_t target_core_dev_pr_show_spc2_res(
1071 return *len; 1026 return *len;
1072} 1027}
1073 1028
1074static ssize_t target_core_dev_pr_show_attr_res_holder( 1029static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
1075 struct se_subsystem_dev *su_dev, 1030 char *page)
1076 char *page)
1077{ 1031{
1078 ssize_t len = 0; 1032 ssize_t len = 0;
1079 1033
1080 if (!su_dev->se_dev_ptr) 1034 switch (dev->t10_pr.res_type) {
1081 return -ENODEV;
1082
1083 switch (su_dev->t10_pr.res_type) {
1084 case SPC3_PERSISTENT_RESERVATIONS: 1035 case SPC3_PERSISTENT_RESERVATIONS:
1085 target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, 1036 target_core_dev_pr_show_spc3_res(dev, page, &len);
1086 page, &len);
1087 break; 1037 break;
1088 case SPC2_RESERVATIONS: 1038 case SPC2_RESERVATIONS:
1089 target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr, 1039 target_core_dev_pr_show_spc2_res(dev, page, &len);
1090 page, &len);
1091 break; 1040 break;
1092 case SPC_PASSTHROUGH: 1041 case SPC_PASSTHROUGH:
1093 len += sprintf(page+len, "Passthrough\n"); 1042 len += sprintf(page+len, "Passthrough\n");
@@ -1102,22 +1051,13 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(
1102 1051
1103SE_DEV_PR_ATTR_RO(res_holder); 1052SE_DEV_PR_ATTR_RO(res_holder);
1104 1053
1105/*
1106 * res_pr_all_tgt_pts
1107 */
1108static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( 1054static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1109 struct se_subsystem_dev *su_dev, 1055 struct se_device *dev, char *page)
1110 char *page)
1111{ 1056{
1112 struct se_device *dev;
1113 struct t10_pr_registration *pr_reg; 1057 struct t10_pr_registration *pr_reg;
1114 ssize_t len = 0; 1058 ssize_t len = 0;
1115 1059
1116 dev = su_dev->se_dev_ptr; 1060 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1117 if (!dev)
1118 return -ENODEV;
1119
1120 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1121 return len; 1061 return len;
1122 1062
1123 spin_lock(&dev->dev_reservation_lock); 1063 spin_lock(&dev->dev_reservation_lock);
@@ -1144,20 +1084,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1144 1084
1145SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); 1085SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
1146 1086
1147/*
1148 * res_pr_generation
1149 */
1150static ssize_t target_core_dev_pr_show_attr_res_pr_generation( 1087static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
1151 struct se_subsystem_dev *su_dev, 1088 struct se_device *dev, char *page)
1152 char *page)
1153{ 1089{
1154 if (!su_dev->se_dev_ptr) 1090 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1155 return -ENODEV;
1156
1157 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1158 return 0; 1091 return 0;
1159 1092
1160 return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); 1093 return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
1161} 1094}
1162 1095
1163SE_DEV_PR_ATTR_RO(res_pr_generation); 1096SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1166,10 +1099,8 @@ SE_DEV_PR_ATTR_RO(res_pr_generation);
1166 * res_pr_holder_tg_port 1099 * res_pr_holder_tg_port
1167 */ 1100 */
1168static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( 1101static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1169 struct se_subsystem_dev *su_dev, 1102 struct se_device *dev, char *page)
1170 char *page)
1171{ 1103{
1172 struct se_device *dev;
1173 struct se_node_acl *se_nacl; 1104 struct se_node_acl *se_nacl;
1174 struct se_lun *lun; 1105 struct se_lun *lun;
1175 struct se_portal_group *se_tpg; 1106 struct se_portal_group *se_tpg;
@@ -1177,11 +1108,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1177 struct target_core_fabric_ops *tfo; 1108 struct target_core_fabric_ops *tfo;
1178 ssize_t len = 0; 1109 ssize_t len = 0;
1179 1110
1180 dev = su_dev->se_dev_ptr; 1111 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1181 if (!dev)
1182 return -ENODEV;
1183
1184 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1185 return len; 1112 return len;
1186 1113
1187 spin_lock(&dev->dev_reservation_lock); 1114 spin_lock(&dev->dev_reservation_lock);
@@ -1211,12 +1138,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1211 1138
1212SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); 1139SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
1213 1140
1214/*
1215 * res_pr_registered_i_pts
1216 */
1217static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( 1141static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1218 struct se_subsystem_dev *su_dev, 1142 struct se_device *dev, char *page)
1219 char *page)
1220{ 1143{
1221 struct target_core_fabric_ops *tfo; 1144 struct target_core_fabric_ops *tfo;
1222 struct t10_pr_registration *pr_reg; 1145 struct t10_pr_registration *pr_reg;
@@ -1225,16 +1148,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1225 ssize_t len = 0; 1148 ssize_t len = 0;
1226 int reg_count = 0, prf_isid; 1149 int reg_count = 0, prf_isid;
1227 1150
1228 if (!su_dev->se_dev_ptr) 1151 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1229 return -ENODEV;
1230
1231 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1232 return len; 1152 return len;
1233 1153
1234 len += sprintf(page+len, "SPC-3 PR Registrations:\n"); 1154 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1235 1155
1236 spin_lock(&su_dev->t10_pr.registration_lock); 1156 spin_lock(&dev->t10_pr.registration_lock);
1237 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 1157 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1238 pr_reg_list) { 1158 pr_reg_list) {
1239 1159
1240 memset(buf, 0, 384); 1160 memset(buf, 0, 384);
@@ -1254,7 +1174,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1254 len += sprintf(page+len, "%s", buf); 1174 len += sprintf(page+len, "%s", buf);
1255 reg_count++; 1175 reg_count++;
1256 } 1176 }
1257 spin_unlock(&su_dev->t10_pr.registration_lock); 1177 spin_unlock(&dev->t10_pr.registration_lock);
1258 1178
1259 if (!reg_count) 1179 if (!reg_count)
1260 len += sprintf(page+len, "None\n"); 1180 len += sprintf(page+len, "None\n");
@@ -1264,22 +1184,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1264 1184
1265SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); 1185SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
1266 1186
1267/*
1268 * res_pr_type
1269 */
1270static ssize_t target_core_dev_pr_show_attr_res_pr_type( 1187static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1271 struct se_subsystem_dev *su_dev, 1188 struct se_device *dev, char *page)
1272 char *page)
1273{ 1189{
1274 struct se_device *dev;
1275 struct t10_pr_registration *pr_reg; 1190 struct t10_pr_registration *pr_reg;
1276 ssize_t len = 0; 1191 ssize_t len = 0;
1277 1192
1278 dev = su_dev->se_dev_ptr; 1193 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1279 if (!dev)
1280 return -ENODEV;
1281
1282 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1283 return len; 1194 return len;
1284 1195
1285 spin_lock(&dev->dev_reservation_lock); 1196 spin_lock(&dev->dev_reservation_lock);
@@ -1298,19 +1209,12 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1298 1209
1299SE_DEV_PR_ATTR_RO(res_pr_type); 1210SE_DEV_PR_ATTR_RO(res_pr_type);
1300 1211
1301/*
1302 * res_type
1303 */
1304static ssize_t target_core_dev_pr_show_attr_res_type( 1212static ssize_t target_core_dev_pr_show_attr_res_type(
1305 struct se_subsystem_dev *su_dev, 1213 struct se_device *dev, char *page)
1306 char *page)
1307{ 1214{
1308 ssize_t len = 0; 1215 ssize_t len = 0;
1309 1216
1310 if (!su_dev->se_dev_ptr) 1217 switch (dev->t10_pr.res_type) {
1311 return -ENODEV;
1312
1313 switch (su_dev->t10_pr.res_type) {
1314 case SPC3_PERSISTENT_RESERVATIONS: 1218 case SPC3_PERSISTENT_RESERVATIONS:
1315 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1219 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1316 break; 1220 break;
@@ -1330,22 +1234,14 @@ static ssize_t target_core_dev_pr_show_attr_res_type(
1330 1234
1331SE_DEV_PR_ATTR_RO(res_type); 1235SE_DEV_PR_ATTR_RO(res_type);
1332 1236
1333/*
1334 * res_aptpl_active
1335 */
1336
1337static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( 1237static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
1338 struct se_subsystem_dev *su_dev, 1238 struct se_device *dev, char *page)
1339 char *page)
1340{ 1239{
1341 if (!su_dev->se_dev_ptr) 1240 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1342 return -ENODEV;
1343
1344 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1345 return 0; 1241 return 0;
1346 1242
1347 return sprintf(page, "APTPL Bit Status: %s\n", 1243 return sprintf(page, "APTPL Bit Status: %s\n",
1348 (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); 1244 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1349} 1245}
1350 1246
1351SE_DEV_PR_ATTR_RO(res_aptpl_active); 1247SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1354,13 +1250,9 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
1354 * res_aptpl_metadata 1250 * res_aptpl_metadata
1355 */ 1251 */
1356static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( 1252static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
1357 struct se_subsystem_dev *su_dev, 1253 struct se_device *dev, char *page)
1358 char *page)
1359{ 1254{
1360 if (!su_dev->se_dev_ptr) 1255 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1361 return -ENODEV;
1362
1363 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1364 return 0; 1256 return 0;
1365 1257
1366 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 1258 return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1392,11 +1284,10 @@ static match_table_t tokens = {
1392}; 1284};
1393 1285
1394static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( 1286static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1395 struct se_subsystem_dev *su_dev, 1287 struct se_device *dev,
1396 const char *page, 1288 const char *page,
1397 size_t count) 1289 size_t count)
1398{ 1290{
1399 struct se_device *dev;
1400 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1291 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1401 unsigned char *t_fabric = NULL, *t_port = NULL; 1292 unsigned char *t_fabric = NULL, *t_port = NULL;
1402 char *orig, *ptr, *arg_p, *opts; 1293 char *orig, *ptr, *arg_p, *opts;
@@ -1408,14 +1299,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1408 u16 port_rpti = 0, tpgt = 0; 1299 u16 port_rpti = 0, tpgt = 0;
1409 u8 type = 0, scope; 1300 u8 type = 0, scope;
1410 1301
1411 dev = su_dev->se_dev_ptr; 1302 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1412 if (!dev)
1413 return -ENODEV;
1414
1415 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1416 return 0; 1303 return 0;
1417 1304
1418 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1305 if (dev->export_count) {
1419 pr_debug("Unable to process APTPL metadata while" 1306 pr_debug("Unable to process APTPL metadata while"
1420 " active fabric exports exist\n"); 1307 " active fabric exports exist\n");
1421 return -EINVAL; 1308 return -EINVAL;
@@ -1558,7 +1445,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1558 goto out; 1445 goto out;
1559 } 1446 }
1560 1447
1561 ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, 1448 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
1562 i_port, isid, mapped_lun, t_port, tpgt, target_lun, 1449 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1563 res_holder, all_tg_pt, type); 1450 res_holder, all_tg_pt, type);
1564out: 1451out:
@@ -1573,7 +1460,7 @@ out:
1573 1460
1574SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); 1461SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1575 1462
1576CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group); 1463CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
1577 1464
1578static struct configfs_attribute *target_core_dev_pr_attrs[] = { 1465static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1579 &target_core_dev_pr_res_holder.attr, 1466 &target_core_dev_pr_res_holder.attr,
@@ -1605,18 +1492,14 @@ static struct config_item_type target_core_dev_pr_cit = {
1605 1492
1606static ssize_t target_core_show_dev_info(void *p, char *page) 1493static ssize_t target_core_show_dev_info(void *p, char *page)
1607{ 1494{
1608 struct se_subsystem_dev *se_dev = p; 1495 struct se_device *dev = p;
1609 struct se_hba *hba = se_dev->se_dev_hba; 1496 struct se_subsystem_api *t = dev->transport;
1610 struct se_subsystem_api *t = hba->transport;
1611 int bl = 0; 1497 int bl = 0;
1612 ssize_t read_bytes = 0; 1498 ssize_t read_bytes = 0;
1613 1499
1614 if (!se_dev->se_dev_ptr) 1500 transport_dump_dev_state(dev, page, &bl);
1615 return -ENODEV;
1616
1617 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
1618 read_bytes += bl; 1501 read_bytes += bl;
1619 read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes); 1502 read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
1620 return read_bytes; 1503 return read_bytes;
1621} 1504}
1622 1505
@@ -1633,17 +1516,10 @@ static ssize_t target_core_store_dev_control(
1633 const char *page, 1516 const char *page,
1634 size_t count) 1517 size_t count)
1635{ 1518{
1636 struct se_subsystem_dev *se_dev = p; 1519 struct se_device *dev = p;
1637 struct se_hba *hba = se_dev->se_dev_hba; 1520 struct se_subsystem_api *t = dev->transport;
1638 struct se_subsystem_api *t = hba->transport;
1639 1521
1640 if (!se_dev->se_dev_su_ptr) { 1522 return t->set_configfs_dev_params(dev, page, count);
1641 pr_err("Unable to locate struct se_subsystem_dev>se"
1642 "_dev_su_ptr\n");
1643 return -EINVAL;
1644 }
1645
1646 return t->set_configfs_dev_params(hba, se_dev, page, count);
1647} 1523}
1648 1524
1649static struct target_core_configfs_attribute target_core_attr_dev_control = { 1525static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -1656,12 +1532,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
1656 1532
1657static ssize_t target_core_show_dev_alias(void *p, char *page) 1533static ssize_t target_core_show_dev_alias(void *p, char *page)
1658{ 1534{
1659 struct se_subsystem_dev *se_dev = p; 1535 struct se_device *dev = p;
1660 1536
1661 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) 1537 if (!(dev->dev_flags & DF_USING_ALIAS))
1662 return 0; 1538 return 0;
1663 1539
1664 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias); 1540 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
1665} 1541}
1666 1542
1667static ssize_t target_core_store_dev_alias( 1543static ssize_t target_core_store_dev_alias(
@@ -1669,8 +1545,8 @@ static ssize_t target_core_store_dev_alias(
1669 const char *page, 1545 const char *page,
1670 size_t count) 1546 size_t count)
1671{ 1547{
1672 struct se_subsystem_dev *se_dev = p; 1548 struct se_device *dev = p;
1673 struct se_hba *hba = se_dev->se_dev_hba; 1549 struct se_hba *hba = dev->se_hba;
1674 ssize_t read_bytes; 1550 ssize_t read_bytes;
1675 1551
1676 if (count > (SE_DEV_ALIAS_LEN-1)) { 1552 if (count > (SE_DEV_ALIAS_LEN-1)) {
@@ -1680,19 +1556,18 @@ static ssize_t target_core_store_dev_alias(
1680 return -EINVAL; 1556 return -EINVAL;
1681 } 1557 }
1682 1558
1683 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1559 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
1684 "%s", page);
1685 if (!read_bytes) 1560 if (!read_bytes)
1686 return -EINVAL; 1561 return -EINVAL;
1687 if (se_dev->se_dev_alias[read_bytes - 1] == '\n') 1562 if (dev->dev_alias[read_bytes - 1] == '\n')
1688 se_dev->se_dev_alias[read_bytes - 1] = '\0'; 1563 dev->dev_alias[read_bytes - 1] = '\0';
1689 1564
1690 se_dev->su_dev_flags |= SDF_USING_ALIAS; 1565 dev->dev_flags |= DF_USING_ALIAS;
1691 1566
1692 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1567 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1693 config_item_name(&hba->hba_group.cg_item), 1568 config_item_name(&hba->hba_group.cg_item),
1694 config_item_name(&se_dev->se_dev_group.cg_item), 1569 config_item_name(&dev->dev_group.cg_item),
1695 se_dev->se_dev_alias); 1570 dev->dev_alias);
1696 1571
1697 return read_bytes; 1572 return read_bytes;
1698} 1573}
@@ -1707,12 +1582,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1707 1582
1708static ssize_t target_core_show_dev_udev_path(void *p, char *page) 1583static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1709{ 1584{
1710 struct se_subsystem_dev *se_dev = p; 1585 struct se_device *dev = p;
1711 1586
1712 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) 1587 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
1713 return 0; 1588 return 0;
1714 1589
1715 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path); 1590 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
1716} 1591}
1717 1592
1718static ssize_t target_core_store_dev_udev_path( 1593static ssize_t target_core_store_dev_udev_path(
@@ -1720,8 +1595,8 @@ static ssize_t target_core_store_dev_udev_path(
1720 const char *page, 1595 const char *page,
1721 size_t count) 1596 size_t count)
1722{ 1597{
1723 struct se_subsystem_dev *se_dev = p; 1598 struct se_device *dev = p;
1724 struct se_hba *hba = se_dev->se_dev_hba; 1599 struct se_hba *hba = dev->se_hba;
1725 ssize_t read_bytes; 1600 ssize_t read_bytes;
1726 1601
1727 if (count > (SE_UDEV_PATH_LEN-1)) { 1602 if (count > (SE_UDEV_PATH_LEN-1)) {
@@ -1731,19 +1606,19 @@ static ssize_t target_core_store_dev_udev_path(
1731 return -EINVAL; 1606 return -EINVAL;
1732 } 1607 }
1733 1608
1734 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1609 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
1735 "%s", page); 1610 "%s", page);
1736 if (!read_bytes) 1611 if (!read_bytes)
1737 return -EINVAL; 1612 return -EINVAL;
1738 if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n') 1613 if (dev->udev_path[read_bytes - 1] == '\n')
1739 se_dev->se_dev_udev_path[read_bytes - 1] = '\0'; 1614 dev->udev_path[read_bytes - 1] = '\0';
1740 1615
1741 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; 1616 dev->dev_flags |= DF_USING_UDEV_PATH;
1742 1617
1743 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1618 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1744 config_item_name(&hba->hba_group.cg_item), 1619 config_item_name(&hba->hba_group.cg_item),
1745 config_item_name(&se_dev->se_dev_group.cg_item), 1620 config_item_name(&dev->dev_group.cg_item),
1746 se_dev->se_dev_udev_path); 1621 dev->udev_path);
1747 1622
1748 return read_bytes; 1623 return read_bytes;
1749} 1624}
@@ -1761,11 +1636,9 @@ static ssize_t target_core_store_dev_enable(
1761 const char *page, 1636 const char *page,
1762 size_t count) 1637 size_t count)
1763{ 1638{
1764 struct se_subsystem_dev *se_dev = p; 1639 struct se_device *dev = p;
1765 struct se_device *dev;
1766 struct se_hba *hba = se_dev->se_dev_hba;
1767 struct se_subsystem_api *t = hba->transport;
1768 char *ptr; 1640 char *ptr;
1641 int ret;
1769 1642
1770 ptr = strstr(page, "1"); 1643 ptr = strstr(page, "1");
1771 if (!ptr) { 1644 if (!ptr) {
@@ -1773,25 +1646,10 @@ static ssize_t target_core_store_dev_enable(
1773 " is \"1\"\n"); 1646 " is \"1\"\n");
1774 return -EINVAL; 1647 return -EINVAL;
1775 } 1648 }
1776 if (se_dev->se_dev_ptr) {
1777 pr_err("se_dev->se_dev_ptr already set for storage"
1778 " object\n");
1779 return -EEXIST;
1780 }
1781
1782 if (t->check_configfs_dev_params(hba, se_dev) < 0)
1783 return -EINVAL;
1784
1785 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1786 if (IS_ERR(dev))
1787 return PTR_ERR(dev);
1788 else if (!dev)
1789 return -EINVAL;
1790
1791 se_dev->se_dev_ptr = dev;
1792 pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
1793 " %p\n", se_dev->se_dev_ptr);
1794 1649
1650 ret = target_configure_device(dev);
1651 if (ret)
1652 return ret;
1795 return count; 1653 return count;
1796} 1654}
1797 1655
@@ -1805,18 +1663,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1805 1663
1806static ssize_t target_core_show_alua_lu_gp(void *p, char *page) 1664static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1807{ 1665{
1808 struct se_device *dev; 1666 struct se_device *dev = p;
1809 struct se_subsystem_dev *su_dev = p;
1810 struct config_item *lu_ci; 1667 struct config_item *lu_ci;
1811 struct t10_alua_lu_gp *lu_gp; 1668 struct t10_alua_lu_gp *lu_gp;
1812 struct t10_alua_lu_gp_member *lu_gp_mem; 1669 struct t10_alua_lu_gp_member *lu_gp_mem;
1813 ssize_t len = 0; 1670 ssize_t len = 0;
1814 1671
1815 dev = su_dev->se_dev_ptr; 1672 if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
1816 if (!dev)
1817 return -ENODEV;
1818
1819 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
1820 return len; 1673 return len;
1821 1674
1822 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1675 lu_gp_mem = dev->dev_alua_lu_gp_mem;
@@ -1843,22 +1696,17 @@ static ssize_t target_core_store_alua_lu_gp(
1843 const char *page, 1696 const char *page,
1844 size_t count) 1697 size_t count)
1845{ 1698{
1846 struct se_device *dev; 1699 struct se_device *dev = p;
1847 struct se_subsystem_dev *su_dev = p; 1700 struct se_hba *hba = dev->se_hba;
1848 struct se_hba *hba = su_dev->se_dev_hba;
1849 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 1701 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1850 struct t10_alua_lu_gp_member *lu_gp_mem; 1702 struct t10_alua_lu_gp_member *lu_gp_mem;
1851 unsigned char buf[LU_GROUP_NAME_BUF]; 1703 unsigned char buf[LU_GROUP_NAME_BUF];
1852 int move = 0; 1704 int move = 0;
1853 1705
1854 dev = su_dev->se_dev_ptr; 1706 if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1855 if (!dev)
1856 return -ENODEV;
1857
1858 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1859 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", 1707 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
1860 config_item_name(&hba->hba_group.cg_item), 1708 config_item_name(&hba->hba_group.cg_item),
1861 config_item_name(&su_dev->se_dev_group.cg_item)); 1709 config_item_name(&dev->dev_group.cg_item));
1862 return -EINVAL; 1710 return -EINVAL;
1863 } 1711 }
1864 if (count > LU_GROUP_NAME_BUF) { 1712 if (count > LU_GROUP_NAME_BUF) {
@@ -1902,7 +1750,7 @@ static ssize_t target_core_store_alua_lu_gp(
1902 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 1750 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1903 " %hu\n", 1751 " %hu\n",
1904 config_item_name(&hba->hba_group.cg_item), 1752 config_item_name(&hba->hba_group.cg_item),
1905 config_item_name(&su_dev->se_dev_group.cg_item), 1753 config_item_name(&dev->dev_group.cg_item),
1906 config_item_name(&lu_gp->lu_gp_group.cg_item), 1754 config_item_name(&lu_gp->lu_gp_group.cg_item),
1907 lu_gp->lu_gp_id); 1755 lu_gp->lu_gp_id);
1908 1756
@@ -1927,7 +1775,7 @@ static ssize_t target_core_store_alua_lu_gp(
1927 " core/alua/lu_gps/%s, ID: %hu\n", 1775 " core/alua/lu_gps/%s, ID: %hu\n",
1928 (move) ? "Moving" : "Adding", 1776 (move) ? "Moving" : "Adding",
1929 config_item_name(&hba->hba_group.cg_item), 1777 config_item_name(&hba->hba_group.cg_item),
1930 config_item_name(&su_dev->se_dev_group.cg_item), 1778 config_item_name(&dev->dev_group.cg_item),
1931 config_item_name(&lu_gp_new->lu_gp_group.cg_item), 1779 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1932 lu_gp_new->lu_gp_id); 1780 lu_gp_new->lu_gp_id);
1933 1781
@@ -1955,69 +1803,44 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
1955 1803
1956static void target_core_dev_release(struct config_item *item) 1804static void target_core_dev_release(struct config_item *item)
1957{ 1805{
1958 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1806 struct config_group *dev_cg = to_config_group(item);
1959 struct se_subsystem_dev, se_dev_group); 1807 struct se_device *dev =
1960 struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 1808 container_of(dev_cg, struct se_device, dev_group);
1961 struct se_subsystem_api *t = hba->transport;
1962 struct config_group *dev_cg = &se_dev->se_dev_group;
1963 1809
1964 kfree(dev_cg->default_groups); 1810 kfree(dev_cg->default_groups);
1965 /* 1811 target_free_device(dev);
1966 * This pointer will set when the storage is enabled with:
1967 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
1968 */
1969 if (se_dev->se_dev_ptr) {
1970 pr_debug("Target_Core_ConfigFS: Calling se_free_"
1971 "virtual_device() for se_dev_ptr: %p\n",
1972 se_dev->se_dev_ptr);
1973
1974 se_free_virtual_device(se_dev->se_dev_ptr, hba);
1975 } else {
1976 /*
1977 * Release struct se_subsystem_dev->se_dev_su_ptr..
1978 */
1979 pr_debug("Target_Core_ConfigFS: Calling t->free_"
1980 "device() for se_dev_su_ptr: %p\n",
1981 se_dev->se_dev_su_ptr);
1982
1983 t->free_device(se_dev->se_dev_su_ptr);
1984 }
1985
1986 pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
1987 "_dev_t: %p\n", se_dev);
1988 kfree(se_dev);
1989} 1812}
1990 1813
1991static ssize_t target_core_dev_show(struct config_item *item, 1814static ssize_t target_core_dev_show(struct config_item *item,
1992 struct configfs_attribute *attr, 1815 struct configfs_attribute *attr,
1993 char *page) 1816 char *page)
1994{ 1817{
1995 struct se_subsystem_dev *se_dev = container_of( 1818 struct config_group *dev_cg = to_config_group(item);
1996 to_config_group(item), struct se_subsystem_dev, 1819 struct se_device *dev =
1997 se_dev_group); 1820 container_of(dev_cg, struct se_device, dev_group);
1998 struct target_core_configfs_attribute *tc_attr = container_of( 1821 struct target_core_configfs_attribute *tc_attr = container_of(
1999 attr, struct target_core_configfs_attribute, attr); 1822 attr, struct target_core_configfs_attribute, attr);
2000 1823
2001 if (!tc_attr->show) 1824 if (!tc_attr->show)
2002 return -EINVAL; 1825 return -EINVAL;
2003 1826
2004 return tc_attr->show(se_dev, page); 1827 return tc_attr->show(dev, page);
2005} 1828}
2006 1829
2007static ssize_t target_core_dev_store(struct config_item *item, 1830static ssize_t target_core_dev_store(struct config_item *item,
2008 struct configfs_attribute *attr, 1831 struct configfs_attribute *attr,
2009 const char *page, size_t count) 1832 const char *page, size_t count)
2010{ 1833{
2011 struct se_subsystem_dev *se_dev = container_of( 1834 struct config_group *dev_cg = to_config_group(item);
2012 to_config_group(item), struct se_subsystem_dev, 1835 struct se_device *dev =
2013 se_dev_group); 1836 container_of(dev_cg, struct se_device, dev_group);
2014 struct target_core_configfs_attribute *tc_attr = container_of( 1837 struct target_core_configfs_attribute *tc_attr = container_of(
2015 attr, struct target_core_configfs_attribute, attr); 1838 attr, struct target_core_configfs_attribute, attr);
2016 1839
2017 if (!tc_attr->store) 1840 if (!tc_attr->store)
2018 return -EINVAL; 1841 return -EINVAL;
2019 1842
2020 return tc_attr->store(se_dev, page, count); 1843 return tc_attr->store(dev, page, count);
2021} 1844}
2022 1845
2023static struct configfs_item_operations target_core_dev_item_ops = { 1846static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2107,7 +1930,6 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
2107{ 1930{
2108 struct se_device *dev; 1931 struct se_device *dev;
2109 struct se_hba *hba; 1932 struct se_hba *hba;
2110 struct se_subsystem_dev *su_dev;
2111 struct t10_alua_lu_gp_member *lu_gp_mem; 1933 struct t10_alua_lu_gp_member *lu_gp_mem;
2112 ssize_t len = 0, cur_len; 1934 ssize_t len = 0, cur_len;
2113 unsigned char buf[LU_GROUP_NAME_BUF]; 1935 unsigned char buf[LU_GROUP_NAME_BUF];
@@ -2117,12 +1939,11 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
2117 spin_lock(&lu_gp->lu_gp_lock); 1939 spin_lock(&lu_gp->lu_gp_lock);
2118 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1940 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2119 dev = lu_gp_mem->lu_gp_mem_dev; 1941 dev = lu_gp_mem->lu_gp_mem_dev;
2120 su_dev = dev->se_sub_dev; 1942 hba = dev->se_hba;
2121 hba = su_dev->se_dev_hba;
2122 1943
2123 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", 1944 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2124 config_item_name(&hba->hba_group.cg_item), 1945 config_item_name(&hba->hba_group.cg_item),
2125 config_item_name(&su_dev->se_dev_group.cg_item)); 1946 config_item_name(&dev->dev_group.cg_item));
2126 cur_len++; /* Extra byte for NULL terminator */ 1947 cur_len++; /* Extra byte for NULL terminator */
2127 1948
2128 if ((cur_len + len) > PAGE_SIZE) { 1949 if ((cur_len + len) > PAGE_SIZE) {
@@ -2260,7 +2081,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2260 const char *page, 2081 const char *page,
2261 size_t count) 2082 size_t count)
2262{ 2083{
2263 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 2084 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2264 unsigned long tmp; 2085 unsigned long tmp;
2265 int new_state, ret; 2086 int new_state, ret;
2266 2087
@@ -2284,7 +2105,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2284 return -EINVAL; 2105 return -EINVAL;
2285 } 2106 }
2286 2107
2287 ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr, 2108 ret = core_alua_do_port_transition(tg_pt_gp, dev,
2288 NULL, NULL, new_state, 0); 2109 NULL, NULL, new_state, 0);
2289 return (!ret) ? count : -EINVAL; 2110 return (!ret) ? count : -EINVAL;
2290} 2111}
@@ -2620,11 +2441,10 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
2620 struct t10_alua *alua = container_of(group, struct t10_alua, 2441 struct t10_alua *alua = container_of(group, struct t10_alua,
2621 alua_tg_pt_gps_group); 2442 alua_tg_pt_gps_group);
2622 struct t10_alua_tg_pt_gp *tg_pt_gp; 2443 struct t10_alua_tg_pt_gp *tg_pt_gp;
2623 struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
2624 struct config_group *alua_tg_pt_gp_cg = NULL; 2444 struct config_group *alua_tg_pt_gp_cg = NULL;
2625 struct config_item *alua_tg_pt_gp_ci = NULL; 2445 struct config_item *alua_tg_pt_gp_ci = NULL;
2626 2446
2627 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); 2447 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
2628 if (!tg_pt_gp) 2448 if (!tg_pt_gp)
2629 return NULL; 2449 return NULL;
2630 2450
@@ -2721,10 +2541,10 @@ static struct config_group *target_core_make_subdev(
2721 const char *name) 2541 const char *name)
2722{ 2542{
2723 struct t10_alua_tg_pt_gp *tg_pt_gp; 2543 struct t10_alua_tg_pt_gp *tg_pt_gp;
2724 struct se_subsystem_dev *se_dev;
2725 struct se_subsystem_api *t; 2544 struct se_subsystem_api *t;
2726 struct config_item *hba_ci = &group->cg_item; 2545 struct config_item *hba_ci = &group->cg_item;
2727 struct se_hba *hba = item_to_hba(hba_ci); 2546 struct se_hba *hba = item_to_hba(hba_ci);
2547 struct se_device *dev;
2728 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; 2548 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2729 struct config_group *dev_stat_grp = NULL; 2549 struct config_group *dev_stat_grp = NULL;
2730 int errno = -ENOMEM, ret; 2550 int errno = -ENOMEM, ret;
@@ -2737,120 +2557,80 @@ static struct config_group *target_core_make_subdev(
2737 */ 2557 */
2738 t = hba->transport; 2558 t = hba->transport;
2739 2559
2740 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 2560 dev = target_alloc_device(hba, name);
2741 if (!se_dev) { 2561 if (!dev)
2742 pr_err("Unable to allocate memory for" 2562 goto out_unlock;
2743 " struct se_subsystem_dev\n"); 2563
2744 goto unlock; 2564 dev_cg = &dev->dev_group;
2745 }
2746 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2747 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2748 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
2749 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
2750 spin_lock_init(&se_dev->t10_pr.registration_lock);
2751 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
2752 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
2753 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
2754 spin_lock_init(&se_dev->se_dev_lock);
2755 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
2756 se_dev->t10_wwn.t10_sub_dev = se_dev;
2757 se_dev->t10_alua.t10_sub_dev = se_dev;
2758 se_dev->se_dev_attrib.da_sub_dev = se_dev;
2759
2760 se_dev->se_dev_hba = hba;
2761 dev_cg = &se_dev->se_dev_group;
2762 2565
2763 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, 2566 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
2764 GFP_KERNEL); 2567 GFP_KERNEL);
2765 if (!dev_cg->default_groups) 2568 if (!dev_cg->default_groups)
2766 goto out; 2569 goto out_free_device;
2767 /*
2768 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
2769 * for ->allocate_virtdevice()
2770 *
2771 * se_dev->se_dev_ptr will be set after ->create_virtdev()
2772 * has been called successfully in the next level up in the
2773 * configfs tree for device object's struct config_group.
2774 */
2775 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
2776 if (!se_dev->se_dev_su_ptr) {
2777 pr_err("Unable to locate subsystem dependent pointer"
2778 " from allocate_virtdevice()\n");
2779 goto out;
2780 }
2781 2570
2782 config_group_init_type_name(&se_dev->se_dev_group, name, 2571 config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
2783 &target_core_dev_cit); 2572 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2784 config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
2785 &target_core_dev_attrib_cit); 2573 &target_core_dev_attrib_cit);
2786 config_group_init_type_name(&se_dev->se_dev_pr_group, "pr", 2574 config_group_init_type_name(&dev->dev_pr_group, "pr",
2787 &target_core_dev_pr_cit); 2575 &target_core_dev_pr_cit);
2788 config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn", 2576 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2789 &target_core_dev_wwn_cit); 2577 &target_core_dev_wwn_cit);
2790 config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, 2578 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2791 "alua", &target_core_alua_tg_pt_gps_cit); 2579 "alua", &target_core_alua_tg_pt_gps_cit);
2792 config_group_init_type_name(&se_dev->dev_stat_grps.stat_group, 2580 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2793 "statistics", &target_core_stat_cit); 2581 "statistics", &target_core_stat_cit);
2794 2582
2795 dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; 2583 dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
2796 dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; 2584 dev_cg->default_groups[1] = &dev->dev_pr_group;
2797 dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; 2585 dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
2798 dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; 2586 dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
2799 dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group; 2587 dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
2800 dev_cg->default_groups[5] = NULL; 2588 dev_cg->default_groups[5] = NULL;
2801 /* 2589 /*
2802 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 2590 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2803 */ 2591 */
2804 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); 2592 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
2805 if (!tg_pt_gp) 2593 if (!tg_pt_gp)
2806 goto out; 2594 goto out_free_dev_cg_default_groups;
2595 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2807 2596
2808 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2597 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2809 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 2598 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
2810 GFP_KERNEL); 2599 GFP_KERNEL);
2811 if (!tg_pt_gp_cg->default_groups) { 2600 if (!tg_pt_gp_cg->default_groups) {
2812 pr_err("Unable to allocate tg_pt_gp_cg->" 2601 pr_err("Unable to allocate tg_pt_gp_cg->"
2813 "default_groups\n"); 2602 "default_groups\n");
2814 goto out; 2603 goto out_free_tg_pt_gp;
2815 } 2604 }
2816 2605
2817 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, 2606 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2818 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); 2607 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2819 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; 2608 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2820 tg_pt_gp_cg->default_groups[1] = NULL; 2609 tg_pt_gp_cg->default_groups[1] = NULL;
2821 se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2822 /* 2610 /*
2823 * Add core/$HBA/$DEV/statistics/ default groups 2611 * Add core/$HBA/$DEV/statistics/ default groups
2824 */ 2612 */
2825 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2613 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2826 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, 2614 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
2827 GFP_KERNEL); 2615 GFP_KERNEL);
2828 if (!dev_stat_grp->default_groups) { 2616 if (!dev_stat_grp->default_groups) {
2829 pr_err("Unable to allocate dev_stat_grp->default_groups\n"); 2617 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
2830 goto out; 2618 goto out_free_tg_pt_gp_cg_default_groups;
2831 } 2619 }
2832 target_stat_setup_dev_default_groups(se_dev); 2620 target_stat_setup_dev_default_groups(dev);
2833
2834 pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
2835 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
2836 2621
2837 mutex_unlock(&hba->hba_access_mutex); 2622 mutex_unlock(&hba->hba_access_mutex);
2838 return &se_dev->se_dev_group; 2623 return dev_cg;
2839out: 2624
2840 if (se_dev->t10_alua.default_tg_pt_gp) { 2625out_free_tg_pt_gp_cg_default_groups:
2841 core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); 2626 kfree(tg_pt_gp_cg->default_groups);
2842 se_dev->t10_alua.default_tg_pt_gp = NULL; 2627out_free_tg_pt_gp:
2843 } 2628 core_alua_free_tg_pt_gp(tg_pt_gp);
2844 if (dev_stat_grp) 2629out_free_dev_cg_default_groups:
2845 kfree(dev_stat_grp->default_groups); 2630 kfree(dev_cg->default_groups);
2846 if (tg_pt_gp_cg) 2631out_free_device:
2847 kfree(tg_pt_gp_cg->default_groups); 2632 target_free_device(dev);
2848 if (dev_cg) 2633out_unlock:
2849 kfree(dev_cg->default_groups);
2850 if (se_dev->se_dev_su_ptr)
2851 t->free_device(se_dev->se_dev_su_ptr);
2852 kfree(se_dev);
2853unlock:
2854 mutex_unlock(&hba->hba_access_mutex); 2634 mutex_unlock(&hba->hba_access_mutex);
2855 return ERR_PTR(errno); 2635 return ERR_PTR(errno);
2856} 2636}
@@ -2859,18 +2639,19 @@ static void target_core_drop_subdev(
2859 struct config_group *group, 2639 struct config_group *group,
2860 struct config_item *item) 2640 struct config_item *item)
2861{ 2641{
2862 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 2642 struct config_group *dev_cg = to_config_group(item);
2863 struct se_subsystem_dev, se_dev_group); 2643 struct se_device *dev =
2644 container_of(dev_cg, struct se_device, dev_group);
2864 struct se_hba *hba; 2645 struct se_hba *hba;
2865 struct config_item *df_item; 2646 struct config_item *df_item;
2866 struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; 2647 struct config_group *tg_pt_gp_cg, *dev_stat_grp;
2867 int i; 2648 int i;
2868 2649
2869 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2650 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
2870 2651
2871 mutex_lock(&hba->hba_access_mutex); 2652 mutex_lock(&hba->hba_access_mutex);
2872 2653
2873 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2654 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2874 for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2655 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2875 df_item = &dev_stat_grp->default_groups[i]->cg_item; 2656 df_item = &dev_stat_grp->default_groups[i]->cg_item;
2876 dev_stat_grp->default_groups[i] = NULL; 2657 dev_stat_grp->default_groups[i] = NULL;
@@ -2878,7 +2659,7 @@ static void target_core_drop_subdev(
2878 } 2659 }
2879 kfree(dev_stat_grp->default_groups); 2660 kfree(dev_stat_grp->default_groups);
2880 2661
2881 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2662 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2882 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { 2663 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2883 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; 2664 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2884 tg_pt_gp_cg->default_groups[i] = NULL; 2665 tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2889,17 +2670,15 @@ static void target_core_drop_subdev(
2889 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp 2670 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2890 * directly from target_core_alua_tg_pt_gp_release(). 2671 * directly from target_core_alua_tg_pt_gp_release().
2891 */ 2672 */
2892 se_dev->t10_alua.default_tg_pt_gp = NULL; 2673 dev->t10_alua.default_tg_pt_gp = NULL;
2893 2674
2894 dev_cg = &se_dev->se_dev_group;
2895 for (i = 0; dev_cg->default_groups[i]; i++) { 2675 for (i = 0; dev_cg->default_groups[i]; i++) {
2896 df_item = &dev_cg->default_groups[i]->cg_item; 2676 df_item = &dev_cg->default_groups[i]->cg_item;
2897 dev_cg->default_groups[i] = NULL; 2677 dev_cg->default_groups[i] = NULL;
2898 config_item_put(df_item); 2678 config_item_put(df_item);
2899 } 2679 }
2900 /* 2680 /*
2901 * The releasing of se_dev and associated se_dev->se_dev_ptr is done 2681 * se_dev is released from target_core_dev_item_ops->release()
2902 * from target_core_dev_item_ops->release() ->target_core_dev_release().
2903 */ 2682 */
2904 config_item_put(item); 2683 config_item_put(item);
2905 mutex_unlock(&hba->hba_access_mutex); 2684 mutex_unlock(&hba->hba_access_mutex);
@@ -2962,13 +2741,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2962 return -EINVAL; 2741 return -EINVAL;
2963 } 2742 }
2964 2743
2965 spin_lock(&hba->device_lock); 2744 if (hba->dev_count) {
2966 if (!list_empty(&hba->hba_dev_list)) {
2967 pr_err("Unable to set hba_mode with active devices\n"); 2745 pr_err("Unable to set hba_mode with active devices\n");
2968 spin_unlock(&hba->device_lock);
2969 return -EINVAL; 2746 return -EINVAL;
2970 } 2747 }
2971 spin_unlock(&hba->device_lock);
2972 2748
2973 ret = transport->pmode_enable_hba(hba, mode_flag); 2749 ret = transport->pmode_enable_hba(hba, mode_flag);
2974 if (ret < 0) 2750 if (ret < 0)
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9abef9f8eb76..b27c75a0e2e2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -50,11 +50,7 @@
50#include "target_core_pr.h" 50#include "target_core_pr.h"
51#include "target_core_ua.h" 51#include "target_core_ua.h"
52 52
53static void se_dev_start(struct se_device *dev);
54static void se_dev_stop(struct se_device *dev);
55
56static struct se_hba *lun0_hba; 53static struct se_hba *lun0_hba;
57static struct se_subsystem_dev *lun0_su_dev;
58/* not static, needed by tpg.c */ 54/* not static, needed by tpg.c */
59struct se_device *g_lun0_dev; 55struct se_device *g_lun0_dev;
60 56
@@ -136,15 +132,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
136 se_cmd->orig_fe_lun = 0; 132 se_cmd->orig_fe_lun = 0;
137 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 133 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
138 } 134 }
139 /*
140 * Determine if the struct se_lun is online.
141 * FIXME: Check for LUN_RESET + UNIT Attention
142 */
143 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
144 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
145 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
146 return -ENODEV;
147 }
148 135
149 /* Directly associate cmd with se_dev */ 136 /* Directly associate cmd with se_dev */
150 se_cmd->se_dev = se_lun->lun_se_dev; 137 se_cmd->se_dev = se_lun->lun_se_dev;
@@ -202,14 +189,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
202 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 189 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
203 return -ENODEV; 190 return -ENODEV;
204 } 191 }
205 /*
206 * Determine if the struct se_lun is online.
207 * FIXME: Check for LUN_RESET + UNIT Attention
208 */
209 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
210 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
211 return -ENODEV;
212 }
213 192
214 /* Directly associate cmd with se_dev */ 193 /* Directly associate cmd with se_dev */
215 se_cmd->se_dev = se_lun->lun_se_dev; 194 se_cmd->se_dev = se_lun->lun_se_dev;
@@ -565,7 +544,6 @@ static void core_export_port(
565 struct se_port *port, 544 struct se_port *port,
566 struct se_lun *lun) 545 struct se_lun *lun)
567{ 546{
568 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
569 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 547 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
570 548
571 spin_lock(&dev->se_port_lock); 549 spin_lock(&dev->se_port_lock);
@@ -578,7 +556,7 @@ static void core_export_port(
578 list_add_tail(&port->sep_list, &dev->dev_sep_list); 556 list_add_tail(&port->sep_list, &dev->dev_sep_list);
579 spin_unlock(&dev->se_port_lock); 557 spin_unlock(&dev->se_port_lock);
580 558
581 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 559 if (dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
582 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 560 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
583 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 561 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
584 pr_err("Unable to allocate t10_alua_tg_pt" 562 pr_err("Unable to allocate t10_alua_tg_pt"
@@ -587,7 +565,7 @@ static void core_export_port(
587 } 565 }
588 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 566 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
589 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 567 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
590 su_dev->t10_alua.default_tg_pt_gp); 568 dev->t10_alua.default_tg_pt_gp);
591 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 569 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
592 pr_debug("%s/%s: Adding to default ALUA Target Port" 570 pr_debug("%s/%s: Adding to default ALUA Target Port"
593 " Group: alua/default_tg_pt_gp\n", 571 " Group: alua/default_tg_pt_gp\n",
@@ -625,6 +603,7 @@ int core_dev_export(
625 struct se_portal_group *tpg, 603 struct se_portal_group *tpg,
626 struct se_lun *lun) 604 struct se_lun *lun)
627{ 605{
606 struct se_hba *hba = dev->se_hba;
628 struct se_port *port; 607 struct se_port *port;
629 608
630 port = core_alloc_port(dev); 609 port = core_alloc_port(dev);
@@ -632,9 +611,11 @@ int core_dev_export(
632 return PTR_ERR(port); 611 return PTR_ERR(port);
633 612
634 lun->lun_se_dev = dev; 613 lun->lun_se_dev = dev;
635 se_dev_start(dev);
636 614
637 atomic_inc(&dev->dev_export_obj.obj_access_count); 615 spin_lock(&hba->device_lock);
616 dev->export_count++;
617 spin_unlock(&hba->device_lock);
618
638 core_export_port(dev, tpg, port, lun); 619 core_export_port(dev, tpg, port, lun);
639 return 0; 620 return 0;
640} 621}
@@ -644,6 +625,7 @@ void core_dev_unexport(
644 struct se_portal_group *tpg, 625 struct se_portal_group *tpg,
645 struct se_lun *lun) 626 struct se_lun *lun)
646{ 627{
628 struct se_hba *hba = dev->se_hba;
647 struct se_port *port = lun->lun_sep; 629 struct se_port *port = lun->lun_sep;
648 630
649 spin_lock(&lun->lun_sep_lock); 631 spin_lock(&lun->lun_sep_lock);
@@ -654,11 +636,13 @@ void core_dev_unexport(
654 spin_unlock(&lun->lun_sep_lock); 636 spin_unlock(&lun->lun_sep_lock);
655 637
656 spin_lock(&dev->se_port_lock); 638 spin_lock(&dev->se_port_lock);
657 atomic_dec(&dev->dev_export_obj.obj_access_count);
658 core_release_port(dev, port); 639 core_release_port(dev, port);
659 spin_unlock(&dev->se_port_lock); 640 spin_unlock(&dev->se_port_lock);
660 641
661 se_dev_stop(dev); 642 spin_lock(&hba->device_lock);
643 dev->export_count--;
644 spin_unlock(&hba->device_lock);
645
662 lun->lun_se_dev = NULL; 646 lun->lun_se_dev = NULL;
663} 647}
664 648
@@ -725,127 +709,17 @@ done:
725 return 0; 709 return 0;
726} 710}
727 711
728/* se_release_device_for_hba(): 712static void se_release_vpd_for_dev(struct se_device *dev)
729 *
730 *
731 */
732void se_release_device_for_hba(struct se_device *dev)
733{
734 struct se_hba *hba = dev->se_hba;
735
736 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
737 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
738 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
739 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
740 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
741 se_dev_stop(dev);
742
743 if (dev->dev_ptr) {
744 destroy_workqueue(dev->tmr_wq);
745 if (dev->transport->free_device)
746 dev->transport->free_device(dev->dev_ptr);
747 }
748
749 spin_lock(&hba->device_lock);
750 list_del(&dev->dev_list);
751 hba->dev_count--;
752 spin_unlock(&hba->device_lock);
753
754 core_scsi3_free_all_registrations(dev);
755 se_release_vpd_for_dev(dev);
756
757 kfree(dev);
758}
759
760void se_release_vpd_for_dev(struct se_device *dev)
761{ 713{
762 struct t10_vpd *vpd, *vpd_tmp; 714 struct t10_vpd *vpd, *vpd_tmp;
763 715
764 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 716 spin_lock(&dev->t10_wwn.t10_vpd_lock);
765 list_for_each_entry_safe(vpd, vpd_tmp, 717 list_for_each_entry_safe(vpd, vpd_tmp,
766 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { 718 &dev->t10_wwn.t10_vpd_list, vpd_list) {
767 list_del(&vpd->vpd_list); 719 list_del(&vpd->vpd_list);
768 kfree(vpd); 720 kfree(vpd);
769 } 721 }
770 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 722 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
771}
772
773/* se_free_virtual_device():
774 *
775 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
776 */
777int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
778{
779 if (!list_empty(&dev->dev_sep_list))
780 dump_stack();
781
782 core_alua_free_lu_gp_mem(dev);
783 se_release_device_for_hba(dev);
784
785 return 0;
786}
787
788static void se_dev_start(struct se_device *dev)
789{
790 struct se_hba *hba = dev->se_hba;
791
792 spin_lock(&hba->device_lock);
793 atomic_inc(&dev->dev_obj.obj_access_count);
794 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
795 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
796 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
797 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
798 } else if (dev->dev_status &
799 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
800 dev->dev_status &=
801 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
802 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
803 }
804 }
805 spin_unlock(&hba->device_lock);
806}
807
808static void se_dev_stop(struct se_device *dev)
809{
810 struct se_hba *hba = dev->se_hba;
811
812 spin_lock(&hba->device_lock);
813 atomic_dec(&dev->dev_obj.obj_access_count);
814 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
815 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
816 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
817 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
818 } else if (dev->dev_status &
819 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
820 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
821 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
822 }
823 }
824 spin_unlock(&hba->device_lock);
825}
826
827int se_dev_check_online(struct se_device *dev)
828{
829 unsigned long flags;
830 int ret;
831
832 spin_lock_irqsave(&dev->dev_status_lock, flags);
833 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
834 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
835 spin_unlock_irqrestore(&dev->dev_status_lock, flags);
836
837 return ret;
838}
839
840int se_dev_check_shutdown(struct se_device *dev)
841{
842 int ret;
843
844 spin_lock_irq(&dev->dev_status_lock);
845 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
846 spin_unlock_irq(&dev->dev_status_lock);
847
848 return ret;
849} 723}
850 724
851static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 725static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
@@ -866,72 +740,13 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
866 return aligned_max_sectors; 740 return aligned_max_sectors;
867} 741}
868 742
869void se_dev_set_default_attribs(
870 struct se_device *dev,
871 struct se_dev_limits *dev_limits)
872{
873 struct queue_limits *limits = &dev_limits->limits;
874
875 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
876 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
877 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
878 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
879 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
880 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
881 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
882 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
883 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
884 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
885 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
886 dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
887 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
888 /*
889 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
890 * iblock_create_virtdevice() from struct queue_limits values
891 * if blk_queue_discard()==1
892 */
893 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
894 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
895 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
896 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
897 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
898 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
899 /*
900 * block_size is based on subsystem plugin dependent requirements.
901 */
902 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
903 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
904 /*
905 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
906 */
907 limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
908 limits->logical_block_size);
909 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
910
911 /*
912 * Set fabric_max_sectors, which is reported in block limits
913 * VPD page (B0h).
914 */
915 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
916 /*
917 * Set optimal_sectors from fabric_max_sectors, which can be
918 * lowered via configfs.
919 */
920 dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
921 /*
922 * queue_depth is based on subsystem plugin dependent requirements.
923 */
924 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
925 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
926}
927
928int se_dev_set_max_unmap_lba_count( 743int se_dev_set_max_unmap_lba_count(
929 struct se_device *dev, 744 struct se_device *dev,
930 u32 max_unmap_lba_count) 745 u32 max_unmap_lba_count)
931{ 746{
932 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 747 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
933 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 748 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
934 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); 749 dev, dev->dev_attrib.max_unmap_lba_count);
935 return 0; 750 return 0;
936} 751}
937 752
@@ -939,10 +754,10 @@ int se_dev_set_max_unmap_block_desc_count(
939 struct se_device *dev, 754 struct se_device *dev,
940 u32 max_unmap_block_desc_count) 755 u32 max_unmap_block_desc_count)
941{ 756{
942 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 757 dev->dev_attrib.max_unmap_block_desc_count =
943 max_unmap_block_desc_count; 758 max_unmap_block_desc_count;
944 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 759 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
945 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); 760 dev, dev->dev_attrib.max_unmap_block_desc_count);
946 return 0; 761 return 0;
947} 762}
948 763
@@ -950,9 +765,9 @@ int se_dev_set_unmap_granularity(
950 struct se_device *dev, 765 struct se_device *dev,
951 u32 unmap_granularity) 766 u32 unmap_granularity)
952{ 767{
953 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; 768 dev->dev_attrib.unmap_granularity = unmap_granularity;
954 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 769 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
955 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); 770 dev, dev->dev_attrib.unmap_granularity);
956 return 0; 771 return 0;
957} 772}
958 773
@@ -960,9 +775,9 @@ int se_dev_set_unmap_granularity_alignment(
960 struct se_device *dev, 775 struct se_device *dev,
961 u32 unmap_granularity_alignment) 776 u32 unmap_granularity_alignment)
962{ 777{
963 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 778 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
964 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 779 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
965 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); 780 dev, dev->dev_attrib.unmap_granularity_alignment);
966 return 0; 781 return 0;
967} 782}
968 783
@@ -993,9 +808,9 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
993 pr_err("emulate_fua_write not supported for pSCSI\n"); 808 pr_err("emulate_fua_write not supported for pSCSI\n");
994 return -EINVAL; 809 return -EINVAL;
995 } 810 }
996 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 811 dev->dev_attrib.emulate_fua_write = flag;
997 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 812 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
998 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); 813 dev, dev->dev_attrib.emulate_fua_write);
999 return 0; 814 return 0;
1000} 815}
1001 816
@@ -1025,9 +840,9 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1025 pr_err("emulate_write_cache not supported for pSCSI\n"); 840 pr_err("emulate_write_cache not supported for pSCSI\n");
1026 return -EINVAL; 841 return -EINVAL;
1027 } 842 }
1028 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 843 dev->dev_attrib.emulate_write_cache = flag;
1029 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 844 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1030 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); 845 dev, dev->dev_attrib.emulate_write_cache);
1031 return 0; 846 return 0;
1032} 847}
1033 848
@@ -1038,16 +853,15 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1038 return -EINVAL; 853 return -EINVAL;
1039 } 854 }
1040 855
1041 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 856 if (dev->export_count) {
1042 pr_err("dev[%p]: Unable to change SE Device" 857 pr_err("dev[%p]: Unable to change SE Device"
1043 " UA_INTRLCK_CTRL while dev_export_obj: %d count" 858 " UA_INTRLCK_CTRL while export_count is %d\n",
1044 " exists\n", dev, 859 dev, dev->export_count);
1045 atomic_read(&dev->dev_export_obj.obj_access_count));
1046 return -EINVAL; 860 return -EINVAL;
1047 } 861 }
1048 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; 862 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
1049 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 863 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1050 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); 864 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
1051 865
1052 return 0; 866 return 0;
1053} 867}
@@ -1059,15 +873,15 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1059 return -EINVAL; 873 return -EINVAL;
1060 } 874 }
1061 875
1062 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 876 if (dev->export_count) {
1063 pr_err("dev[%p]: Unable to change SE Device TAS while" 877 pr_err("dev[%p]: Unable to change SE Device TAS while"
1064 " dev_export_obj: %d count exists\n", dev, 878 " export_count is %d\n",
1065 atomic_read(&dev->dev_export_obj.obj_access_count)); 879 dev, dev->export_count);
1066 return -EINVAL; 880 return -EINVAL;
1067 } 881 }
1068 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; 882 dev->dev_attrib.emulate_tas = flag;
1069 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 883 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1070 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 884 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1071 885
1072 return 0; 886 return 0;
1073} 887}
@@ -1082,12 +896,12 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1082 * We expect this value to be non-zero when generic Block Layer 896 * We expect this value to be non-zero when generic Block Layer
1083 * Discard supported is detected iblock_create_virtdevice(). 897 * Discard supported is detected iblock_create_virtdevice().
1084 */ 898 */
1085 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 899 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
1086 pr_err("Generic Block Discard not supported\n"); 900 pr_err("Generic Block Discard not supported\n");
1087 return -ENOSYS; 901 return -ENOSYS;
1088 } 902 }
1089 903
1090 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; 904 dev->dev_attrib.emulate_tpu = flag;
1091 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 905 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1092 dev, flag); 906 dev, flag);
1093 return 0; 907 return 0;
@@ -1103,12 +917,12 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1103 * We expect this value to be non-zero when generic Block Layer 917 * We expect this value to be non-zero when generic Block Layer
1104 * Discard supported is detected iblock_create_virtdevice(). 918 * Discard supported is detected iblock_create_virtdevice().
1105 */ 919 */
1106 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 920 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
1107 pr_err("Generic Block Discard not supported\n"); 921 pr_err("Generic Block Discard not supported\n");
1108 return -ENOSYS; 922 return -ENOSYS;
1109 } 923 }
1110 924
1111 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; 925 dev->dev_attrib.emulate_tpws = flag;
1112 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 926 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1113 dev, flag); 927 dev, flag);
1114 return 0; 928 return 0;
@@ -1120,9 +934,9 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1120 pr_err("Illegal value %d\n", flag); 934 pr_err("Illegal value %d\n", flag);
1121 return -EINVAL; 935 return -EINVAL;
1122 } 936 }
1123 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; 937 dev->dev_attrib.enforce_pr_isids = flag;
1124 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 938 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1125 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 939 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1126 return 0; 940 return 0;
1127} 941}
1128 942
@@ -1132,7 +946,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1132 printk(KERN_ERR "Illegal value %d\n", flag); 946 printk(KERN_ERR "Illegal value %d\n", flag);
1133 return -EINVAL; 947 return -EINVAL;
1134 } 948 }
1135 dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; 949 dev->dev_attrib.is_nonrot = flag;
1136 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 950 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1137 dev, flag); 951 dev, flag);
1138 return 0; 952 return 0;
@@ -1145,7 +959,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1145 " reordering not implemented\n", dev); 959 " reordering not implemented\n", dev);
1146 return -ENOSYS; 960 return -ENOSYS;
1147 } 961 }
1148 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; 962 dev->dev_attrib.emulate_rest_reord = flag;
1149 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 963 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1150 return 0; 964 return 0;
1151} 965}
@@ -1155,10 +969,10 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1155 */ 969 */
1156int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 970int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1157{ 971{
1158 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 972 if (dev->export_count) {
1159 pr_err("dev[%p]: Unable to change SE Device TCQ while" 973 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1160 " dev_export_obj: %d count exists\n", dev, 974 " export_count is %d\n",
1161 atomic_read(&dev->dev_export_obj.obj_access_count)); 975 dev, dev->export_count);
1162 return -EINVAL; 976 return -EINVAL;
1163 } 977 }
1164 if (!queue_depth) { 978 if (!queue_depth) {
@@ -1168,26 +982,26 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1168 } 982 }
1169 983
1170 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 984 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1171 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 985 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1172 pr_err("dev[%p]: Passed queue_depth: %u" 986 pr_err("dev[%p]: Passed queue_depth: %u"
1173 " exceeds TCM/SE_Device TCQ: %u\n", 987 " exceeds TCM/SE_Device TCQ: %u\n",
1174 dev, queue_depth, 988 dev, queue_depth,
1175 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 989 dev->dev_attrib.hw_queue_depth);
1176 return -EINVAL; 990 return -EINVAL;
1177 } 991 }
1178 } else { 992 } else {
1179 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { 993 if (queue_depth > dev->dev_attrib.queue_depth) {
1180 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 994 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1181 pr_err("dev[%p]: Passed queue_depth:" 995 pr_err("dev[%p]: Passed queue_depth:"
1182 " %u exceeds TCM/SE_Device MAX" 996 " %u exceeds TCM/SE_Device MAX"
1183 " TCQ: %u\n", dev, queue_depth, 997 " TCQ: %u\n", dev, queue_depth,
1184 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 998 dev->dev_attrib.hw_queue_depth);
1185 return -EINVAL; 999 return -EINVAL;
1186 } 1000 }
1187 } 1001 }
1188 } 1002 }
1189 1003
1190 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1004 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1191 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1005 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1192 dev, queue_depth); 1006 dev, queue_depth);
1193 return 0; 1007 return 0;
@@ -1195,10 +1009,10 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1195 1009
1196int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1010int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1197{ 1011{
1198 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1012 if (dev->export_count) {
1199 pr_err("dev[%p]: Unable to change SE Device" 1013 pr_err("dev[%p]: Unable to change SE Device"
1200 " fabric_max_sectors while dev_export_obj: %d count exists\n", 1014 " fabric_max_sectors while export_count is %d\n",
1201 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1015 dev, dev->export_count);
1202 return -EINVAL; 1016 return -EINVAL;
1203 } 1017 }
1204 if (!fabric_max_sectors) { 1018 if (!fabric_max_sectors) {
@@ -1213,11 +1027,11 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1213 return -EINVAL; 1027 return -EINVAL;
1214 } 1028 }
1215 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1029 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1216 if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1030 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
1217 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1031 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1218 " greater than TCM/SE_Device max_sectors:" 1032 " greater than TCM/SE_Device max_sectors:"
1219 " %u\n", dev, fabric_max_sectors, 1033 " %u\n", dev, fabric_max_sectors,
1220 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1034 dev->dev_attrib.hw_max_sectors);
1221 return -EINVAL; 1035 return -EINVAL;
1222 } 1036 }
1223 } else { 1037 } else {
@@ -1233,9 +1047,9 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1233 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1047 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1234 */ 1048 */
1235 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 1049 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1236 dev->se_sub_dev->se_dev_attrib.block_size); 1050 dev->dev_attrib.block_size);
1237 1051
1238 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors; 1052 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1239 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1053 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1240 dev, fabric_max_sectors); 1054 dev, fabric_max_sectors);
1241 return 0; 1055 return 0;
@@ -1243,10 +1057,10 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1243 1057
1244int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1058int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1245{ 1059{
1246 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1060 if (dev->export_count) {
1247 pr_err("dev[%p]: Unable to change SE Device" 1061 pr_err("dev[%p]: Unable to change SE Device"
1248 " optimal_sectors while dev_export_obj: %d count exists\n", 1062 " optimal_sectors while export_count is %d\n",
1249 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1063 dev, dev->export_count);
1250 return -EINVAL; 1064 return -EINVAL;
1251 } 1065 }
1252 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1066 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
@@ -1254,14 +1068,14 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1254 " changed for TCM/pSCSI\n", dev); 1068 " changed for TCM/pSCSI\n", dev);
1255 return -EINVAL; 1069 return -EINVAL;
1256 } 1070 }
1257 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { 1071 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1258 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1072 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1259 " greater than fabric_max_sectors: %u\n", dev, 1073 " greater than fabric_max_sectors: %u\n", dev,
1260 optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors); 1074 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1261 return -EINVAL; 1075 return -EINVAL;
1262 } 1076 }
1263 1077
1264 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; 1078 dev->dev_attrib.optimal_sectors = optimal_sectors;
1265 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1079 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1266 dev, optimal_sectors); 1080 dev, optimal_sectors);
1267 return 0; 1081 return 0;
@@ -1269,10 +1083,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1269 1083
1270int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1084int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1271{ 1085{
1272 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1086 if (dev->export_count) {
1273 pr_err("dev[%p]: Unable to change SE Device block_size" 1087 pr_err("dev[%p]: Unable to change SE Device block_size"
1274 " while dev_export_obj: %d count exists\n", dev, 1088 " while export_count is %d\n",
1275 atomic_read(&dev->dev_export_obj.obj_access_count)); 1089 dev, dev->export_count);
1276 return -EINVAL; 1090 return -EINVAL;
1277 } 1091 }
1278 1092
@@ -1293,7 +1107,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1293 return -EINVAL; 1107 return -EINVAL;
1294 } 1108 }
1295 1109
1296 dev->se_sub_dev->se_dev_attrib.block_size = block_size; 1110 dev->dev_attrib.block_size = block_size;
1297 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1111 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1298 dev, block_size); 1112 dev, block_size);
1299 return 0; 1113 return 0;
@@ -1307,12 +1121,6 @@ struct se_lun *core_dev_add_lun(
1307 struct se_lun *lun_p; 1121 struct se_lun *lun_p;
1308 int rc; 1122 int rc;
1309 1123
1310 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1311 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1312 atomic_read(&dev->dev_access_obj.obj_access_count));
1313 return ERR_PTR(-EACCES);
1314 }
1315
1316 lun_p = core_tpg_pre_addlun(tpg, lun); 1124 lun_p = core_tpg_pre_addlun(tpg, lun);
1317 if (IS_ERR(lun_p)) 1125 if (IS_ERR(lun_p))
1318 return lun_p; 1126 return lun_p;
@@ -1568,12 +1376,220 @@ void core_dev_free_initiator_node_lun_acl(
1568 kfree(lacl); 1376 kfree(lacl);
1569} 1377}
1570 1378
1379static void scsi_dump_inquiry(struct se_device *dev)
1380{
1381 struct t10_wwn *wwn = &dev->t10_wwn;
1382 char buf[17];
1383 int i, device_type;
1384 /*
1385 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1386 */
1387 for (i = 0; i < 8; i++)
1388 if (wwn->vendor[i] >= 0x20)
1389 buf[i] = wwn->vendor[i];
1390 else
1391 buf[i] = ' ';
1392 buf[i] = '\0';
1393 pr_debug(" Vendor: %s\n", buf);
1394
1395 for (i = 0; i < 16; i++)
1396 if (wwn->model[i] >= 0x20)
1397 buf[i] = wwn->model[i];
1398 else
1399 buf[i] = ' ';
1400 buf[i] = '\0';
1401 pr_debug(" Model: %s\n", buf);
1402
1403 for (i = 0; i < 4; i++)
1404 if (wwn->revision[i] >= 0x20)
1405 buf[i] = wwn->revision[i];
1406 else
1407 buf[i] = ' ';
1408 buf[i] = '\0';
1409 pr_debug(" Revision: %s\n", buf);
1410
1411 device_type = dev->transport->get_device_type(dev);
1412 pr_debug(" Type: %s ", scsi_device_type(device_type));
1413 pr_debug(" ANSI SCSI revision: %02x\n",
1414 dev->transport->get_device_rev(dev));
1415}
1416
1417struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1418{
1419 struct se_device *dev;
1420
1421 dev = hba->transport->alloc_device(hba, name);
1422 if (!dev)
1423 return NULL;
1424
1425 dev->se_hba = hba;
1426 dev->transport = hba->transport;
1427
1428 INIT_LIST_HEAD(&dev->dev_list);
1429 INIT_LIST_HEAD(&dev->dev_sep_list);
1430 INIT_LIST_HEAD(&dev->dev_tmr_list);
1431 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1432 INIT_LIST_HEAD(&dev->state_list);
1433 INIT_LIST_HEAD(&dev->qf_cmd_list);
1434 spin_lock_init(&dev->stats_lock);
1435 spin_lock_init(&dev->execute_task_lock);
1436 spin_lock_init(&dev->delayed_cmd_lock);
1437 spin_lock_init(&dev->dev_reservation_lock);
1438 spin_lock_init(&dev->se_port_lock);
1439 spin_lock_init(&dev->se_tmr_lock);
1440 spin_lock_init(&dev->qf_cmd_lock);
1441 atomic_set(&dev->dev_ordered_id, 0);
1442 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1443 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1444 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1445 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1446 spin_lock_init(&dev->t10_pr.registration_lock);
1447 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1448 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1449 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1450
1451 dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1452 dev->t10_wwn.t10_dev = dev;
1453 dev->t10_alua.t10_dev = dev;
1454
1455 dev->dev_attrib.da_dev = dev;
1456 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1457 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1458 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1459 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1460 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1461 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1462 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1463 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1464 dev->dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
1465 dev->dev_attrib.emulate_alua = DA_EMULATE_ALUA;
1466 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1467 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1468 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1469 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1470 dev->dev_attrib.max_unmap_block_desc_count =
1471 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1472 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1473 dev->dev_attrib.unmap_granularity_alignment =
1474 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1475 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1476 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1477
1478 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
1479 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1480 else
1481 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1482
1483 return dev;
1484}
1485
1486int target_configure_device(struct se_device *dev)
1487{
1488 struct se_hba *hba = dev->se_hba;
1489 int ret;
1490
1491 if (dev->dev_flags & DF_CONFIGURED) {
1492 pr_err("se_dev->se_dev_ptr already set for storage"
1493 " object\n");
1494 return -EEXIST;
1495 }
1496
1497 ret = dev->transport->configure_device(dev);
1498 if (ret)
1499 goto out;
1500 dev->dev_flags |= DF_CONFIGURED;
1501
1502 /*
1503 * XXX: there is not much point to have two different values here..
1504 */
1505 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1506 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1507
1508 /*
1509 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1510 */
1511 dev->dev_attrib.hw_max_sectors =
1512 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1513 dev->dev_attrib.hw_block_size);
1514
1515 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1516 dev->creation_time = get_jiffies_64();
1517
1518 core_setup_reservations(dev);
1519
1520 ret = core_setup_alua(dev);
1521 if (ret)
1522 goto out;
1523
1524 /*
1525 * Startup the struct se_device processing thread
1526 */
1527 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1528 dev->transport->name);
1529 if (!dev->tmr_wq) {
1530 pr_err("Unable to create tmr workqueue for %s\n",
1531 dev->transport->name);
1532 ret = -ENOMEM;
1533 goto out_free_alua;
1534 }
1535
1536 /*
1537 * Setup work_queue for QUEUE_FULL
1538 */
1539 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1540
1541 /*
1542 * Preload the initial INQUIRY const values if we are doing
1543 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1544 * passthrough because this is being provided by the backend LLD.
1545 */
1546 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1547 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1548 strncpy(&dev->t10_wwn.model[0],
1549 dev->transport->inquiry_prod, 16);
1550 strncpy(&dev->t10_wwn.revision[0],
1551 dev->transport->inquiry_rev, 4);
1552 }
1553
1554 scsi_dump_inquiry(dev);
1555
1556 spin_lock(&hba->device_lock);
1557 hba->dev_count++;
1558 spin_unlock(&hba->device_lock);
1559 return 0;
1560
1561out_free_alua:
1562 core_alua_free_lu_gp_mem(dev);
1563out:
1564 se_release_vpd_for_dev(dev);
1565 return ret;
1566}
1567
1568void target_free_device(struct se_device *dev)
1569{
1570 struct se_hba *hba = dev->se_hba;
1571
1572 WARN_ON(!list_empty(&dev->dev_sep_list));
1573
1574 if (dev->dev_flags & DF_CONFIGURED) {
1575 destroy_workqueue(dev->tmr_wq);
1576
1577 spin_lock(&hba->device_lock);
1578 hba->dev_count--;
1579 spin_unlock(&hba->device_lock);
1580 }
1581
1582 core_alua_free_lu_gp_mem(dev);
1583 core_scsi3_free_all_registrations(dev);
1584 se_release_vpd_for_dev(dev);
1585
1586 dev->transport->free_device(dev);
1587}
1588
1571int core_dev_setup_virtual_lun0(void) 1589int core_dev_setup_virtual_lun0(void)
1572{ 1590{
1573 struct se_hba *hba; 1591 struct se_hba *hba;
1574 struct se_device *dev; 1592 struct se_device *dev;
1575 struct se_subsystem_dev *se_dev = NULL;
1576 struct se_subsystem_api *t;
1577 char buf[16]; 1593 char buf[16];
1578 int ret; 1594 int ret;
1579 1595
@@ -1581,60 +1597,28 @@ int core_dev_setup_virtual_lun0(void)
1581 if (IS_ERR(hba)) 1597 if (IS_ERR(hba))
1582 return PTR_ERR(hba); 1598 return PTR_ERR(hba);
1583 1599
1584 lun0_hba = hba; 1600 dev = target_alloc_device(hba, "virt_lun0");
1585 t = hba->transport; 1601 if (!dev) {
1586
1587 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1588 if (!se_dev) {
1589 pr_err("Unable to allocate memory for"
1590 " struct se_subsystem_dev\n");
1591 ret = -ENOMEM; 1602 ret = -ENOMEM;
1592 goto out; 1603 goto out_free_hba;
1593 } 1604 }
1594 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1595 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1596 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1597 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1598 spin_lock_init(&se_dev->t10_pr.registration_lock);
1599 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1600 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1601 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1602 spin_lock_init(&se_dev->se_dev_lock);
1603 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1604 se_dev->t10_wwn.t10_sub_dev = se_dev;
1605 se_dev->t10_alua.t10_sub_dev = se_dev;
1606 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1607 se_dev->se_dev_hba = hba;
1608
1609 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1610 if (!se_dev->se_dev_su_ptr) {
1611 pr_err("Unable to locate subsystem dependent pointer"
1612 " from allocate_virtdevice()\n");
1613 ret = -ENOMEM;
1614 goto out;
1615 }
1616 lun0_su_dev = se_dev;
1617 1605
1618 memset(buf, 0, 16); 1606 memset(buf, 0, 16);
1619 sprintf(buf, "rd_pages=8"); 1607 sprintf(buf, "rd_pages=8");
1620 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); 1608 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1621 1609
1622 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1610 ret = target_configure_device(dev);
1623 if (IS_ERR(dev)) { 1611 if (ret)
1624 ret = PTR_ERR(dev); 1612 goto out_free_se_dev;
1625 goto out;
1626 }
1627 se_dev->se_dev_ptr = dev;
1628 g_lun0_dev = dev;
1629 1613
1614 lun0_hba = hba;
1615 g_lun0_dev = dev;
1630 return 0; 1616 return 0;
1631out: 1617
1632 lun0_su_dev = NULL; 1618out_free_se_dev:
1633 kfree(se_dev); 1619 target_free_device(dev);
1634 if (lun0_hba) { 1620out_free_hba:
1635 core_delete_hba(lun0_hba); 1621 core_delete_hba(hba);
1636 lun0_hba = NULL;
1637 }
1638 return ret; 1622 return ret;
1639} 1623}
1640 1624
@@ -1642,14 +1626,11 @@ out:
1642void core_dev_release_virtual_lun0(void) 1626void core_dev_release_virtual_lun0(void)
1643{ 1627{
1644 struct se_hba *hba = lun0_hba; 1628 struct se_hba *hba = lun0_hba;
1645 struct se_subsystem_dev *su_dev = lun0_su_dev;
1646 1629
1647 if (!hba) 1630 if (!hba)
1648 return; 1631 return;
1649 1632
1650 if (g_lun0_dev) 1633 if (g_lun0_dev)
1651 se_free_virtual_device(g_lun0_dev, hba); 1634 target_free_device(g_lun0_dev);
1652
1653 kfree(su_dev);
1654 core_delete_hba(hba); 1635 core_delete_hba(hba);
1655} 1636}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index bca737bb813d..aa6731337f84 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -734,14 +734,12 @@ static int target_fabric_port_link(
734 struct config_item *se_dev_ci) 734 struct config_item *se_dev_ci)
735{ 735{
736 struct config_item *tpg_ci; 736 struct config_item *tpg_ci;
737 struct se_device *dev;
738 struct se_lun *lun = container_of(to_config_group(lun_ci), 737 struct se_lun *lun = container_of(to_config_group(lun_ci),
739 struct se_lun, lun_group); 738 struct se_lun, lun_group);
740 struct se_lun *lun_p; 739 struct se_lun *lun_p;
741 struct se_portal_group *se_tpg; 740 struct se_portal_group *se_tpg;
742 struct se_subsystem_dev *se_dev = container_of( 741 struct se_device *dev =
743 to_config_group(se_dev_ci), struct se_subsystem_dev, 742 container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
744 se_dev_group);
745 struct target_fabric_configfs *tf; 743 struct target_fabric_configfs *tf;
746 int ret; 744 int ret;
747 745
@@ -755,14 +753,6 @@ static int target_fabric_port_link(
755 return -EEXIST; 753 return -EEXIST;
756 } 754 }
757 755
758 dev = se_dev->se_dev_ptr;
759 if (!dev) {
760 pr_err("Unable to locate struct se_device pointer from"
761 " %s\n", config_item_name(se_dev_ci));
762 ret = -ENODEV;
763 goto out;
764 }
765
766 lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); 756 lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
767 if (IS_ERR(lun_p)) { 757 if (IS_ERR(lun_p)) {
768 pr_err("core_dev_add_lun() failed\n"); 758 pr_err("core_dev_add_lun() failed\n");
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0360383dfb94..a89d80dacad2 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -41,7 +41,10 @@
41 41
42#include "target_core_file.h" 42#include "target_core_file.h"
43 43
44static struct se_subsystem_api fileio_template; 44static inline struct fd_dev *FD_DEV(struct se_device *dev)
45{
46 return container_of(dev, struct fd_dev, dev);
47}
45 48
46/* fd_attach_hba(): (Part of se_subsystem_api_t template) 49/* fd_attach_hba(): (Part of se_subsystem_api_t template)
47 * 50 *
@@ -82,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
82 hba->hba_ptr = NULL; 85 hba->hba_ptr = NULL;
83} 86}
84 87
85static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 88static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
86{ 89{
87 struct fd_dev *fd_dev; 90 struct fd_dev *fd_dev;
88 struct fd_host *fd_host = hba->hba_ptr; 91 struct fd_host *fd_host = hba->hba_ptr;
@@ -97,34 +100,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
97 100
98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 101 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
99 102
100 return fd_dev; 103 return &fd_dev->dev;
101} 104}
102 105
103/* fd_create_virtdevice(): (Part of se_subsystem_api_t template) 106static int fd_configure_device(struct se_device *dev)
104 *
105 *
106 */
107static struct se_device *fd_create_virtdevice(
108 struct se_hba *hba,
109 struct se_subsystem_dev *se_dev,
110 void *p)
111{ 107{
112 struct se_device *dev; 108 struct fd_dev *fd_dev = FD_DEV(dev);
113 struct se_dev_limits dev_limits; 109 struct fd_host *fd_host = dev->se_hba->hba_ptr;
114 struct queue_limits *limits;
115 struct fd_dev *fd_dev = p;
116 struct fd_host *fd_host = hba->hba_ptr;
117 struct file *file; 110 struct file *file;
118 struct inode *inode = NULL; 111 struct inode *inode = NULL;
119 int dev_flags = 0, flags, ret = -EINVAL; 112 int flags, ret = -EINVAL;
120 113
121 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 114 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
115 pr_err("Missing fd_dev_name=\n");
116 return -EINVAL;
117 }
122 118
123 /* 119 /*
124 * Use O_DSYNC by default instead of O_SYNC to forgo syncing 120 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
125 * of pure timestamp updates. 121 * of pure timestamp updates.
126 */ 122 */
127 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 123 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
124
128 /* 125 /*
129 * Optionally allow fd_buffered_io=1 to be enabled for people 126 * Optionally allow fd_buffered_io=1 to be enabled for people
130 * who want use the fs buffer cache as an WriteCache mechanism. 127 * who want use the fs buffer cache as an WriteCache mechanism.
@@ -154,22 +151,17 @@ static struct se_device *fd_create_virtdevice(
154 */ 151 */
155 inode = file->f_mapping->host; 152 inode = file->f_mapping->host;
156 if (S_ISBLK(inode->i_mode)) { 153 if (S_ISBLK(inode->i_mode)) {
157 struct request_queue *q; 154 struct request_queue *q = bdev_get_queue(inode->i_bdev);
158 unsigned long long dev_size; 155 unsigned long long dev_size;
159 /* 156
160 * Setup the local scope queue_limits from struct request_queue->limits 157 dev->dev_attrib.hw_block_size =
161 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 158 bdev_logical_block_size(inode->i_bdev);
162 */ 159 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
163 q = bdev_get_queue(inode->i_bdev); 160
164 limits = &dev_limits.limits;
165 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
166 limits->max_hw_sectors = queue_max_hw_sectors(q);
167 limits->max_sectors = queue_max_sectors(q);
168 /* 161 /*
169 * Determine the number of bytes from i_size_read() minus 162 * Determine the number of bytes from i_size_read() minus
170 * one (1) logical sector from underlying struct block_device 163 * one (1) logical sector from underlying struct block_device
171 */ 164 */
172 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
173 dev_size = (i_size_read(file->f_mapping->host) - 165 dev_size = (i_size_read(file->f_mapping->host) -
174 fd_dev->fd_block_size); 166 fd_dev->fd_block_size);
175 167
@@ -185,26 +177,18 @@ static struct se_device *fd_create_virtdevice(
185 goto fail; 177 goto fail;
186 } 178 }
187 179
188 limits = &dev_limits.limits; 180 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
189 limits->logical_block_size = FD_BLOCKSIZE; 181 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
190 limits->max_hw_sectors = FD_MAX_SECTORS;
191 limits->max_sectors = FD_MAX_SECTORS;
192 fd_dev->fd_block_size = FD_BLOCKSIZE;
193 } 182 }
194 183
195 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 184 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
196 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
197 185
198 dev = transport_add_device_to_core_hba(hba, &fileio_template, 186 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
199 se_dev, dev_flags, fd_dev,
200 &dev_limits, "FILEIO", FD_VERSION);
201 if (!dev)
202 goto fail;
203 187
204 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 188 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
205 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" 189 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
206 " with FDBD_HAS_BUFFERED_IO_WCE\n"); 190 " with FDBD_HAS_BUFFERED_IO_WCE\n");
207 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1; 191 dev->dev_attrib.emulate_write_cache = 1;
208 } 192 }
209 193
210 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 194 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
@@ -214,22 +198,18 @@ static struct se_device *fd_create_virtdevice(
214 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 198 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
215 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 199 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
216 200
217 return dev; 201 return 0;
218fail: 202fail:
219 if (fd_dev->fd_file) { 203 if (fd_dev->fd_file) {
220 filp_close(fd_dev->fd_file, NULL); 204 filp_close(fd_dev->fd_file, NULL);
221 fd_dev->fd_file = NULL; 205 fd_dev->fd_file = NULL;
222 } 206 }
223 return ERR_PTR(ret); 207 return ret;
224} 208}
225 209
226/* fd_free_device(): (Part of se_subsystem_api_t template) 210static void fd_free_device(struct se_device *dev)
227 *
228 *
229 */
230static void fd_free_device(void *p)
231{ 211{
232 struct fd_dev *fd_dev = p; 212 struct fd_dev *fd_dev = FD_DEV(dev);
233 213
234 if (fd_dev->fd_file) { 214 if (fd_dev->fd_file) {
235 filp_close(fd_dev->fd_file, NULL); 215 filp_close(fd_dev->fd_file, NULL);
@@ -243,13 +223,12 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
243 u32 sgl_nents) 223 u32 sgl_nents)
244{ 224{
245 struct se_device *se_dev = cmd->se_dev; 225 struct se_device *se_dev = cmd->se_dev;
246 struct fd_dev *dev = se_dev->dev_ptr; 226 struct fd_dev *dev = FD_DEV(se_dev);
247 struct file *fd = dev->fd_file; 227 struct file *fd = dev->fd_file;
248 struct scatterlist *sg; 228 struct scatterlist *sg;
249 struct iovec *iov; 229 struct iovec *iov;
250 mm_segment_t old_fs; 230 mm_segment_t old_fs;
251 loff_t pos = (cmd->t_task_lba * 231 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
252 se_dev->se_sub_dev->se_dev_attrib.block_size);
253 int ret = 0, i; 232 int ret = 0, i;
254 233
255 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); 234 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
@@ -296,13 +275,12 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
296 u32 sgl_nents) 275 u32 sgl_nents)
297{ 276{
298 struct se_device *se_dev = cmd->se_dev; 277 struct se_device *se_dev = cmd->se_dev;
299 struct fd_dev *dev = se_dev->dev_ptr; 278 struct fd_dev *dev = FD_DEV(se_dev);
300 struct file *fd = dev->fd_file; 279 struct file *fd = dev->fd_file;
301 struct scatterlist *sg; 280 struct scatterlist *sg;
302 struct iovec *iov; 281 struct iovec *iov;
303 mm_segment_t old_fs; 282 mm_segment_t old_fs;
304 loff_t pos = (cmd->t_task_lba * 283 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
305 se_dev->se_sub_dev->se_dev_attrib.block_size);
306 int ret, i = 0; 284 int ret, i = 0;
307 285
308 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); 286 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
@@ -334,7 +312,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
334static int fd_execute_sync_cache(struct se_cmd *cmd) 312static int fd_execute_sync_cache(struct se_cmd *cmd)
335{ 313{
336 struct se_device *dev = cmd->se_dev; 314 struct se_device *dev = cmd->se_dev;
337 struct fd_dev *fd_dev = dev->dev_ptr; 315 struct fd_dev *fd_dev = FD_DEV(dev);
338 int immed = (cmd->t_task_cdb[1] & 0x2); 316 int immed = (cmd->t_task_cdb[1] & 0x2);
339 loff_t start, end; 317 loff_t start, end;
340 int ret; 318 int ret;
@@ -353,7 +331,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
353 start = 0; 331 start = 0;
354 end = LLONG_MAX; 332 end = LLONG_MAX;
355 } else { 333 } else {
356 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 334 start = cmd->t_task_lba * dev->dev_attrib.block_size;
357 if (cmd->data_length) 335 if (cmd->data_length)
358 end = start + cmd->data_length; 336 end = start + cmd->data_length;
359 else 337 else
@@ -399,11 +377,11 @@ static int fd_execute_rw(struct se_cmd *cmd)
399 * Allow this to happen independent of WCE=0 setting. 377 * Allow this to happen independent of WCE=0 setting.
400 */ 378 */
401 if (ret > 0 && 379 if (ret > 0 &&
402 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 380 dev->dev_attrib.emulate_fua_write > 0 &&
403 (cmd->se_cmd_flags & SCF_FUA)) { 381 (cmd->se_cmd_flags & SCF_FUA)) {
404 struct fd_dev *fd_dev = dev->dev_ptr; 382 struct fd_dev *fd_dev = FD_DEV(dev);
405 loff_t start = cmd->t_task_lba * 383 loff_t start = cmd->t_task_lba *
406 dev->se_sub_dev->se_dev_attrib.block_size; 384 dev->dev_attrib.block_size;
407 loff_t end = start + cmd->data_length; 385 loff_t end = start + cmd->data_length;
408 386
409 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 387 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
@@ -430,12 +408,10 @@ static match_table_t tokens = {
430 {Opt_err, NULL} 408 {Opt_err, NULL}
431}; 409};
432 410
433static ssize_t fd_set_configfs_dev_params( 411static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
434 struct se_hba *hba, 412 const char *page, ssize_t count)
435 struct se_subsystem_dev *se_dev,
436 const char *page, ssize_t count)
437{ 413{
438 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 414 struct fd_dev *fd_dev = FD_DEV(dev);
439 char *orig, *ptr, *arg_p, *opts; 415 char *orig, *ptr, *arg_p, *opts;
440 substring_t args[MAX_OPT_ARGS]; 416 substring_t args[MAX_OPT_ARGS];
441 int ret = 0, arg, token; 417 int ret = 0, arg, token;
@@ -502,24 +478,9 @@ out:
502 return (!ret) ? count : ret; 478 return (!ret) ? count : ret;
503} 479}
504 480
505static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 481static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
506{
507 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
508
509 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
510 pr_err("Missing fd_dev_name=\n");
511 return -EINVAL;
512 }
513
514 return 0;
515}
516
517static ssize_t fd_show_configfs_dev_params(
518 struct se_hba *hba,
519 struct se_subsystem_dev *se_dev,
520 char *b)
521{ 482{
522 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 483 struct fd_dev *fd_dev = FD_DEV(dev);
523 ssize_t bl = 0; 484 ssize_t bl = 0;
524 485
525 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 486 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
@@ -550,7 +511,7 @@ static u32 fd_get_device_type(struct se_device *dev)
550 511
551static sector_t fd_get_blocks(struct se_device *dev) 512static sector_t fd_get_blocks(struct se_device *dev)
552{ 513{
553 struct fd_dev *fd_dev = dev->dev_ptr; 514 struct fd_dev *fd_dev = FD_DEV(dev);
554 struct file *f = fd_dev->fd_file; 515 struct file *f = fd_dev->fd_file;
555 struct inode *i = f->f_mapping->host; 516 struct inode *i = f->f_mapping->host;
556 unsigned long long dev_size; 517 unsigned long long dev_size;
@@ -564,7 +525,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
564 else 525 else
565 dev_size = fd_dev->fd_dev_size; 526 dev_size = fd_dev->fd_dev_size;
566 527
567 return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); 528 return div_u64(dev_size, dev->dev_attrib.block_size);
568} 529}
569 530
570static struct spc_ops fd_spc_ops = { 531static struct spc_ops fd_spc_ops = {
@@ -579,15 +540,16 @@ static int fd_parse_cdb(struct se_cmd *cmd)
579 540
580static struct se_subsystem_api fileio_template = { 541static struct se_subsystem_api fileio_template = {
581 .name = "fileio", 542 .name = "fileio",
543 .inquiry_prod = "FILEIO",
544 .inquiry_rev = FD_VERSION,
582 .owner = THIS_MODULE, 545 .owner = THIS_MODULE,
583 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 546 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
584 .attach_hba = fd_attach_hba, 547 .attach_hba = fd_attach_hba,
585 .detach_hba = fd_detach_hba, 548 .detach_hba = fd_detach_hba,
586 .allocate_virtdevice = fd_allocate_virtdevice, 549 .alloc_device = fd_alloc_device,
587 .create_virtdevice = fd_create_virtdevice, 550 .configure_device = fd_configure_device,
588 .free_device = fd_free_device, 551 .free_device = fd_free_device,
589 .parse_cdb = fd_parse_cdb, 552 .parse_cdb = fd_parse_cdb,
590 .check_configfs_dev_params = fd_check_configfs_dev_params,
591 .set_configfs_dev_params = fd_set_configfs_dev_params, 553 .set_configfs_dev_params = fd_set_configfs_dev_params,
592 .show_configfs_dev_params = fd_show_configfs_dev_params, 554 .show_configfs_dev_params = fd_show_configfs_dev_params,
593 .get_device_rev = fd_get_device_rev, 555 .get_device_rev = fd_get_device_rev,
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 876ae53ef5b8..bc02b018ae46 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -17,6 +17,8 @@
17#define FDBD_HAS_BUFFERED_IO_WCE 0x04 17#define FDBD_HAS_BUFFERED_IO_WCE 0x04
18 18
19struct fd_dev { 19struct fd_dev {
20 struct se_device dev;
21
20 u32 fbd_flags; 22 u32 fbd_flags;
21 unsigned char fd_dev_name[FD_MAX_DEV_NAME]; 23 unsigned char fd_dev_name[FD_MAX_DEV_NAME];
22 /* Unique Ramdisk Device ID in Ramdisk HBA */ 24 /* Unique Ramdisk Device ID in Ramdisk HBA */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 3dd1bd4b6f71..048b7b4b9102 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -113,7 +113,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
113 return ERR_PTR(-ENOMEM); 113 return ERR_PTR(-ENOMEM);
114 } 114 }
115 115
116 INIT_LIST_HEAD(&hba->hba_dev_list);
117 spin_lock_init(&hba->device_lock); 116 spin_lock_init(&hba->device_lock);
118 mutex_init(&hba->hba_access_mutex); 117 mutex_init(&hba->hba_access_mutex);
119 118
@@ -152,8 +151,7 @@ out_free_hba:
152int 151int
153core_delete_hba(struct se_hba *hba) 152core_delete_hba(struct se_hba *hba)
154{ 153{
155 if (!list_empty(&hba->hba_dev_list)) 154 WARN_ON(hba->dev_count);
156 dump_stack();
157 155
158 hba->transport->detach_hba(hba); 156 hba->transport->detach_hba(hba);
159 157
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 57d7674c5013..dd6cd92cd9d8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -50,6 +50,12 @@
50#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 50#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
51#define IBLOCK_BIO_POOL_SIZE 128 51#define IBLOCK_BIO_POOL_SIZE 128
52 52
53static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
54{
55 return container_of(dev, struct iblock_dev, dev);
56}
57
58
53static struct se_subsystem_api iblock_template; 59static struct se_subsystem_api iblock_template;
54 60
55static void iblock_bio_done(struct bio *, int); 61static void iblock_bio_done(struct bio *, int);
@@ -70,7 +76,7 @@ static void iblock_detach_hba(struct se_hba *hba)
70{ 76{
71} 77}
72 78
73static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 79static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
74{ 80{
75 struct iblock_dev *ib_dev = NULL; 81 struct iblock_dev *ib_dev = NULL;
76 82
@@ -82,40 +88,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
82 88
83 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 89 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
84 90
85 return ib_dev; 91 return &ib_dev->dev;
86} 92}
87 93
88static struct se_device *iblock_create_virtdevice( 94static int iblock_configure_device(struct se_device *dev)
89 struct se_hba *hba,
90 struct se_subsystem_dev *se_dev,
91 void *p)
92{ 95{
93 struct iblock_dev *ib_dev = p; 96 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
94 struct se_device *dev;
95 struct se_dev_limits dev_limits;
96 struct block_device *bd = NULL;
97 struct request_queue *q; 97 struct request_queue *q;
98 struct queue_limits *limits; 98 struct block_device *bd = NULL;
99 u32 dev_flags = 0;
100 fmode_t mode; 99 fmode_t mode;
101 int ret = -EINVAL; 100 int ret = -ENOMEM;
102 101
103 if (!ib_dev) { 102 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
104 pr_err("Unable to locate struct iblock_dev parameter\n"); 103 pr_err("Missing udev_path= parameters for IBLOCK\n");
105 return ERR_PTR(ret); 104 return -EINVAL;
106 } 105 }
107 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
108 106
109 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 107 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
110 if (!ib_dev->ibd_bio_set) { 108 if (!ib_dev->ibd_bio_set) {
111 pr_err("IBLOCK: Unable to create bioset()\n"); 109 pr_err("IBLOCK: Unable to create bioset\n");
112 return ERR_PTR(-ENOMEM); 110 goto out;
113 } 111 }
114 pr_debug("IBLOCK: Created bio_set()\n"); 112
115 /*
116 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
117 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
118 */
119 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 113 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
120 ib_dev->ibd_udev_path); 114 ib_dev->ibd_udev_path);
121 115
@@ -126,27 +120,15 @@ static struct se_device *iblock_create_virtdevice(
126 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 120 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
127 if (IS_ERR(bd)) { 121 if (IS_ERR(bd)) {
128 ret = PTR_ERR(bd); 122 ret = PTR_ERR(bd);
129 goto failed; 123 goto out_free_bioset;
130 } 124 }
131 /*
132 * Setup the local scope queue_limits from struct request_queue->limits
133 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
134 */
135 q = bdev_get_queue(bd);
136 limits = &dev_limits.limits;
137 limits->logical_block_size = bdev_logical_block_size(bd);
138 limits->max_hw_sectors = UINT_MAX;
139 limits->max_sectors = UINT_MAX;
140 dev_limits.hw_queue_depth = q->nr_requests;
141 dev_limits.queue_depth = q->nr_requests;
142
143 ib_dev->ibd_bd = bd; 125 ib_dev->ibd_bd = bd;
144 126
145 dev = transport_add_device_to_core_hba(hba, 127 q = bdev_get_queue(bd);
146 &iblock_template, se_dev, dev_flags, ib_dev, 128
147 &dev_limits, "IBLOCK", IBLOCK_VERSION); 129 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
148 if (!dev) 130 dev->dev_attrib.hw_max_sectors = UINT_MAX;
149 goto failed; 131 dev->dev_attrib.hw_queue_depth = q->nr_requests;
150 132
151 /* 133 /*
152 * Check if the underlying struct block_device request_queue supports 134 * Check if the underlying struct block_device request_queue supports
@@ -154,15 +136,16 @@ static struct se_device *iblock_create_virtdevice(
154 * in ATA and we need to set TPE=1 136 * in ATA and we need to set TPE=1
155 */ 137 */
156 if (blk_queue_discard(q)) { 138 if (blk_queue_discard(q)) {
157 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 139 dev->dev_attrib.max_unmap_lba_count =
158 q->limits.max_discard_sectors; 140 q->limits.max_discard_sectors;
141
159 /* 142 /*
160 * Currently hardcoded to 1 in Linux/SCSI code.. 143 * Currently hardcoded to 1 in Linux/SCSI code..
161 */ 144 */
162 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 145 dev->dev_attrib.max_unmap_block_desc_count = 1;
163 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 146 dev->dev_attrib.unmap_granularity =
164 q->limits.discard_granularity >> 9; 147 q->limits.discard_granularity >> 9;
165 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 148 dev->dev_attrib.unmap_granularity_alignment =
166 q->limits.discard_alignment; 149 q->limits.discard_alignment;
167 150
168 pr_debug("IBLOCK: BLOCK Discard support available," 151 pr_debug("IBLOCK: BLOCK Discard support available,"
@@ -170,22 +153,19 @@ static struct se_device *iblock_create_virtdevice(
170 } 153 }
171 154
172 if (blk_queue_nonrot(q)) 155 if (blk_queue_nonrot(q))
173 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 156 dev->dev_attrib.is_nonrot = 1;
174 157 return 0;
175 return dev;
176 158
177failed: 159out_free_bioset:
178 if (ib_dev->ibd_bio_set) { 160 bioset_free(ib_dev->ibd_bio_set);
179 bioset_free(ib_dev->ibd_bio_set); 161 ib_dev->ibd_bio_set = NULL;
180 ib_dev->ibd_bio_set = NULL; 162out:
181 } 163 return ret;
182 ib_dev->ibd_bd = NULL;
183 return ERR_PTR(ret);
184} 164}
185 165
186static void iblock_free_device(void *p) 166static void iblock_free_device(struct se_device *dev)
187{ 167{
188 struct iblock_dev *ib_dev = p; 168 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
189 169
190 if (ib_dev->ibd_bd != NULL) 170 if (ib_dev->ibd_bd != NULL)
191 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 171 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
@@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
203 bdev_logical_block_size(bd)) - 1); 183 bdev_logical_block_size(bd)) - 1);
204 u32 block_size = bdev_logical_block_size(bd); 184 u32 block_size = bdev_logical_block_size(bd);
205 185
206 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 186 if (block_size == dev->dev_attrib.block_size)
207 return blocks_long; 187 return blocks_long;
208 188
209 switch (block_size) { 189 switch (block_size) {
210 case 4096: 190 case 4096:
211 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 191 switch (dev->dev_attrib.block_size) {
212 case 2048: 192 case 2048:
213 blocks_long <<= 1; 193 blocks_long <<= 1;
214 break; 194 break;
@@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
222 } 202 }
223 break; 203 break;
224 case 2048: 204 case 2048:
225 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 205 switch (dev->dev_attrib.block_size) {
226 case 4096: 206 case 4096:
227 blocks_long >>= 1; 207 blocks_long >>= 1;
228 break; 208 break;
@@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
237 } 217 }
238 break; 218 break;
239 case 1024: 219 case 1024:
240 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 220 switch (dev->dev_attrib.block_size) {
241 case 4096: 221 case 4096:
242 blocks_long >>= 2; 222 blocks_long >>= 2;
243 break; 223 break;
@@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
252 } 232 }
253 break; 233 break;
254 case 512: 234 case 512:
255 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 235 switch (dev->dev_attrib.block_size) {
256 case 4096: 236 case 4096:
257 blocks_long >>= 3; 237 blocks_long >>= 3;
258 break; 238 break;
@@ -299,7 +279,7 @@ static void iblock_end_io_flush(struct bio *bio, int err)
299 */ 279 */
300static int iblock_execute_sync_cache(struct se_cmd *cmd) 280static int iblock_execute_sync_cache(struct se_cmd *cmd)
301{ 281{
302 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 282 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
303 int immed = (cmd->t_task_cdb[1] & 0x2); 283 int immed = (cmd->t_task_cdb[1] & 0x2);
304 struct bio *bio; 284 struct bio *bio;
305 285
@@ -322,7 +302,7 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
322static int iblock_execute_unmap(struct se_cmd *cmd) 302static int iblock_execute_unmap(struct se_cmd *cmd)
323{ 303{
324 struct se_device *dev = cmd->se_dev; 304 struct se_device *dev = cmd->se_dev;
325 struct iblock_dev *ibd = dev->dev_ptr; 305 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
326 unsigned char *buf, *ptr = NULL; 306 unsigned char *buf, *ptr = NULL;
327 sector_t lba; 307 sector_t lba;
328 int size; 308 int size;
@@ -349,7 +329,7 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
349 else 329 else
350 size = bd_dl; 330 size = bd_dl;
351 331
352 if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 332 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
353 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 333 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
354 ret = -EINVAL; 334 ret = -EINVAL;
355 goto err; 335 goto err;
@@ -366,7 +346,7 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
366 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 346 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
367 (unsigned long long)lba, range); 347 (unsigned long long)lba, range);
368 348
369 if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { 349 if (range > dev->dev_attrib.max_unmap_lba_count) {
370 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 350 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
371 ret = -EINVAL; 351 ret = -EINVAL;
372 goto err; 352 goto err;
@@ -378,7 +358,7 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
378 goto err; 358 goto err;
379 } 359 }
380 360
381 ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, 361 ret = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
382 GFP_KERNEL, 0); 362 GFP_KERNEL, 0);
383 if (ret < 0) { 363 if (ret < 0) {
384 pr_err("blkdev_issue_discard() failed: %d\n", 364 pr_err("blkdev_issue_discard() failed: %d\n",
@@ -399,10 +379,10 @@ err:
399 379
400static int iblock_execute_write_same(struct se_cmd *cmd) 380static int iblock_execute_write_same(struct se_cmd *cmd)
401{ 381{
402 struct iblock_dev *ibd = cmd->se_dev->dev_ptr; 382 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
403 int ret; 383 int ret;
404 384
405 ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, 385 ret = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
406 spc_get_write_same_sectors(cmd), GFP_KERNEL, 386 spc_get_write_same_sectors(cmd), GFP_KERNEL,
407 0); 387 0);
408 if (ret < 0) { 388 if (ret < 0) {
@@ -425,11 +405,10 @@ static match_table_t tokens = {
425 {Opt_err, NULL} 405 {Opt_err, NULL}
426}; 406};
427 407
428static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 408static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
429 struct se_subsystem_dev *se_dev, 409 const char *page, ssize_t count)
430 const char *page, ssize_t count)
431{ 410{
432 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 411 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
433 char *orig, *ptr, *arg_p, *opts; 412 char *orig, *ptr, *arg_p, *opts;
434 substring_t args[MAX_OPT_ARGS]; 413 substring_t args[MAX_OPT_ARGS];
435 int ret = 0, token; 414 int ret = 0, token;
@@ -491,43 +470,26 @@ out:
491 return (!ret) ? count : ret; 470 return (!ret) ? count : ret;
492} 471}
493 472
494static ssize_t iblock_check_configfs_dev_params( 473static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
495 struct se_hba *hba,
496 struct se_subsystem_dev *se_dev)
497{
498 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
499
500 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
501 pr_err("Missing udev_path= parameters for IBLOCK\n");
502 return -EINVAL;
503 }
504
505 return 0;
506}
507
508static ssize_t iblock_show_configfs_dev_params(
509 struct se_hba *hba,
510 struct se_subsystem_dev *se_dev,
511 char *b)
512{ 474{
513 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 475 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
514 struct block_device *bd = ibd->ibd_bd; 476 struct block_device *bd = ib_dev->ibd_bd;
515 char buf[BDEVNAME_SIZE]; 477 char buf[BDEVNAME_SIZE];
516 ssize_t bl = 0; 478 ssize_t bl = 0;
517 479
518 if (bd) 480 if (bd)
519 bl += sprintf(b + bl, "iBlock device: %s", 481 bl += sprintf(b + bl, "iBlock device: %s",
520 bdevname(bd, buf)); 482 bdevname(bd, buf));
521 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) 483 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
522 bl += sprintf(b + bl, " UDEV PATH: %s", 484 bl += sprintf(b + bl, " UDEV PATH: %s",
523 ibd->ibd_udev_path); 485 ib_dev->ibd_udev_path);
524 bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); 486 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
525 487
526 bl += sprintf(b + bl, " "); 488 bl += sprintf(b + bl, " ");
527 if (bd) { 489 if (bd) {
528 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 490 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
529 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 491 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
530 "" : (bd->bd_holder == ibd) ? 492 "" : (bd->bd_holder == ib_dev) ?
531 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 493 "CLAIMED: IBLOCK" : "CLAIMED: OS");
532 } else { 494 } else {
533 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 495 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -556,7 +518,7 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
556static struct bio * 518static struct bio *
557iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 519iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
558{ 520{
559 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 521 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
560 struct bio *bio; 522 struct bio *bio;
561 523
562 /* 524 /*
@@ -611,8 +573,8 @@ static int iblock_execute_rw(struct se_cmd *cmd)
611 * Force data to disk if we pretend to not have a volatile 573 * Force data to disk if we pretend to not have a volatile
612 * write cache, or the initiator set the Force Unit Access bit. 574 * write cache, or the initiator set the Force Unit Access bit.
613 */ 575 */
614 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 576 if (dev->dev_attrib.emulate_write_cache == 0 ||
615 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 577 (dev->dev_attrib.emulate_fua_write > 0 &&
616 (cmd->se_cmd_flags & SCF_FUA))) 578 (cmd->se_cmd_flags & SCF_FUA)))
617 rw = WRITE_FUA; 579 rw = WRITE_FUA;
618 else 580 else
@@ -625,17 +587,17 @@ static int iblock_execute_rw(struct se_cmd *cmd)
625 * Convert the blocksize advertised to the initiator to the 512 byte 587 * Convert the blocksize advertised to the initiator to the 512 byte
626 * units unconditionally used by the Linux block layer. 588 * units unconditionally used by the Linux block layer.
627 */ 589 */
628 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 590 if (dev->dev_attrib.block_size == 4096)
629 block_lba = (cmd->t_task_lba << 3); 591 block_lba = (cmd->t_task_lba << 3);
630 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 592 else if (dev->dev_attrib.block_size == 2048)
631 block_lba = (cmd->t_task_lba << 2); 593 block_lba = (cmd->t_task_lba << 2);
632 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 594 else if (dev->dev_attrib.block_size == 1024)
633 block_lba = (cmd->t_task_lba << 1); 595 block_lba = (cmd->t_task_lba << 1);
634 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 596 else if (dev->dev_attrib.block_size == 512)
635 block_lba = cmd->t_task_lba; 597 block_lba = cmd->t_task_lba;
636 else { 598 else {
637 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 599 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
638 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 600 " %u\n", dev->dev_attrib.block_size);
639 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 601 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
640 return -ENOSYS; 602 return -ENOSYS;
641 } 603 }
@@ -714,8 +676,8 @@ static u32 iblock_get_device_type(struct se_device *dev)
714 676
715static sector_t iblock_get_blocks(struct se_device *dev) 677static sector_t iblock_get_blocks(struct se_device *dev)
716{ 678{
717 struct iblock_dev *ibd = dev->dev_ptr; 679 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
718 struct block_device *bd = ibd->ibd_bd; 680 struct block_device *bd = ib_dev->ibd_bd;
719 struct request_queue *q = bdev_get_queue(bd); 681 struct request_queue *q = bdev_get_queue(bd);
720 682
721 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 683 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
@@ -761,15 +723,16 @@ static int iblock_parse_cdb(struct se_cmd *cmd)
761 723
762static struct se_subsystem_api iblock_template = { 724static struct se_subsystem_api iblock_template = {
763 .name = "iblock", 725 .name = "iblock",
726 .inquiry_prod = "IBLOCK",
727 .inquiry_rev = IBLOCK_VERSION,
764 .owner = THIS_MODULE, 728 .owner = THIS_MODULE,
765 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 729 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
766 .attach_hba = iblock_attach_hba, 730 .attach_hba = iblock_attach_hba,
767 .detach_hba = iblock_detach_hba, 731 .detach_hba = iblock_detach_hba,
768 .allocate_virtdevice = iblock_allocate_virtdevice, 732 .alloc_device = iblock_alloc_device,
769 .create_virtdevice = iblock_create_virtdevice, 733 .configure_device = iblock_configure_device,
770 .free_device = iblock_free_device, 734 .free_device = iblock_free_device,
771 .parse_cdb = iblock_parse_cdb, 735 .parse_cdb = iblock_parse_cdb,
772 .check_configfs_dev_params = iblock_check_configfs_dev_params,
773 .set_configfs_dev_params = iblock_set_configfs_dev_params, 736 .set_configfs_dev_params = iblock_set_configfs_dev_params,
774 .show_configfs_dev_params = iblock_show_configfs_dev_params, 737 .show_configfs_dev_params = iblock_show_configfs_dev_params,
775 .get_device_rev = iblock_get_device_rev, 738 .get_device_rev = iblock_get_device_rev,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 533627ae79ec..01c2afd81500 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -14,6 +14,7 @@ struct iblock_req {
14#define IBDF_HAS_UDEV_PATH 0x01 14#define IBDF_HAS_UDEV_PATH 0x01
15 15
16struct iblock_dev { 16struct iblock_dev {
17 struct se_device dev;
17 unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; 18 unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
18 u32 ibd_flags; 19 u32 ibd_flags;
19 struct bio_set *ibd_bio_set; 20 struct bio_set *ibd_bio_set;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0fd428225d11..5854ed67af59 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -20,12 +20,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
20void core_dev_unexport(struct se_device *, struct se_portal_group *, 20void core_dev_unexport(struct se_device *, struct se_portal_group *,
21 struct se_lun *); 21 struct se_lun *);
22int target_report_luns(struct se_cmd *); 22int target_report_luns(struct se_cmd *);
23void se_release_device_for_hba(struct se_device *);
24void se_release_vpd_for_dev(struct se_device *);
25int se_free_virtual_device(struct se_device *, struct se_hba *);
26int se_dev_check_online(struct se_device *);
27int se_dev_check_shutdown(struct se_device *);
28void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
29int se_dev_set_task_timeout(struct se_device *, u32); 23int se_dev_set_task_timeout(struct se_device *, u32);
30int se_dev_set_max_unmap_lba_count(struct se_device *, u32); 24int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
31int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); 25int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
60 struct se_lun_acl *lacl); 54 struct se_lun_acl *lacl);
61int core_dev_setup_virtual_lun0(void); 55int core_dev_setup_virtual_lun0(void);
62void core_dev_release_virtual_lun0(void); 56void core_dev_release_virtual_lun0(void);
57struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
58int target_configure_device(struct se_device *dev);
59void target_free_device(struct se_device *);
63 60
64/* target_core_hba.c */ 61/* target_core_hba.c */
65struct se_hba *core_alloc_hba(const char *, u32, u32); 62struct se_hba *core_alloc_hba(const char *, u32, u32);
@@ -106,9 +103,10 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
106int transport_clear_lun_from_sessions(struct se_lun *); 103int transport_clear_lun_from_sessions(struct se_lun *);
107void transport_send_task_abort(struct se_cmd *); 104void transport_send_task_abort(struct se_cmd *);
108int target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 105int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
106void target_qf_do_work(struct work_struct *work);
109 107
110/* target_core_stat.c */ 108/* target_core_stat.c */
111void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); 109void target_stat_setup_dev_default_groups(struct se_device *);
112void target_stat_setup_port_default_groups(struct se_lun *); 110void target_stat_setup_port_default_groups(struct se_lun *);
113void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); 111void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
114 112
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8c323a98c4a0..2b289891672f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -103,7 +103,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
103 spin_unlock(&dev->dev_reservation_lock); 103 spin_unlock(&dev->dev_reservation_lock);
104 return -EINVAL; 104 return -EINVAL;
105 } 105 }
106 if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { 106 if (!(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID)) {
107 spin_unlock(&dev->dev_reservation_lock); 107 spin_unlock(&dev->dev_reservation_lock);
108 return 0; 108 return 0;
109 } 109 }
@@ -120,10 +120,10 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
120static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd) 120static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
121{ 121{
122 struct se_session *se_sess = cmd->se_sess; 122 struct se_session *se_sess = cmd->se_sess;
123 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 123 struct se_device *dev = cmd->se_dev;
124 struct t10_pr_registration *pr_reg; 124 struct t10_pr_registration *pr_reg;
125 struct t10_reservation *pr_tmpl = &su_dev->t10_pr; 125 struct t10_reservation *pr_tmpl = &dev->t10_pr;
126 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 126 int crh = (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
127 int conflict = 0; 127 int conflict = 0;
128 128
129 if (!crh) 129 if (!crh)
@@ -223,10 +223,10 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
223 goto out_unlock; 223 goto out_unlock;
224 224
225 dev->dev_reserved_node_acl = NULL; 225 dev->dev_reserved_node_acl = NULL;
226 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 226 dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
227 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { 227 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
228 dev->dev_res_bin_isid = 0; 228 dev->dev_res_bin_isid = 0;
229 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; 229 dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
230 } 230 }
231 tpg = sess->se_tpg; 231 tpg = sess->se_tpg;
232 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" 232 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
@@ -292,10 +292,10 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
292 } 292 }
293 293
294 dev->dev_reserved_node_acl = sess->se_node_acl; 294 dev->dev_reserved_node_acl = sess->se_node_acl;
295 dev->dev_flags |= DF_SPC2_RESERVATIONS; 295 dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
296 if (sess->sess_bin_isid != 0) { 296 if (sess->sess_bin_isid != 0) {
297 dev->dev_res_bin_isid = sess->sess_bin_isid; 297 dev->dev_res_bin_isid = sess->sess_bin_isid;
298 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; 298 dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
299 } 299 }
300 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" 300 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
301 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 301 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -333,7 +333,7 @@ static int core_scsi3_pr_seq_non_holder(
333 /* 333 /*
334 * A legacy SPC-2 reservation is being held. 334 * A legacy SPC-2 reservation is being held.
335 */ 335 */
336 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) 336 if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
337 return core_scsi2_reservation_seq_non_holder(cmd, 337 return core_scsi2_reservation_seq_non_holder(cmd,
338 cdb, pr_reg_type); 338 cdb, pr_reg_type);
339 339
@@ -565,8 +565,8 @@ static int core_scsi3_pr_seq_non_holder(
565 565
566static u32 core_scsi3_pr_generation(struct se_device *dev) 566static u32 core_scsi3_pr_generation(struct se_device *dev)
567{ 567{
568 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
569 u32 prg; 568 u32 prg;
569
570 /* 570 /*
571 * PRGeneration field shall contain the value of a 32-bit wrapping 571 * PRGeneration field shall contain the value of a 32-bit wrapping
572 * counter mainted by the device server. 572 * counter mainted by the device server.
@@ -577,7 +577,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
577 * See spc4r17 section 6.3.12 READ_KEYS service action 577 * See spc4r17 section 6.3.12 READ_KEYS service action
578 */ 578 */
579 spin_lock(&dev->dev_reservation_lock); 579 spin_lock(&dev->dev_reservation_lock);
580 prg = su_dev->t10_pr.pr_generation++; 580 prg = dev->t10_pr.pr_generation++;
581 spin_unlock(&dev->dev_reservation_lock); 581 spin_unlock(&dev->dev_reservation_lock);
582 582
583 return prg; 583 return prg;
@@ -596,7 +596,7 @@ static int core_scsi3_pr_reservation_check(
596 /* 596 /*
597 * A legacy SPC-2 reservation is being held. 597 * A legacy SPC-2 reservation is being held.
598 */ 598 */
599 if (dev->dev_flags & DF_SPC2_RESERVATIONS) 599 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
600 return core_scsi2_reservation_check(cmd, pr_reg_type); 600 return core_scsi2_reservation_check(cmd, pr_reg_type);
601 601
602 spin_lock(&dev->dev_reservation_lock); 602 spin_lock(&dev->dev_reservation_lock);
@@ -636,7 +636,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
636 int all_tg_pt, 636 int all_tg_pt,
637 int aptpl) 637 int aptpl)
638{ 638{
639 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
640 struct t10_pr_registration *pr_reg; 639 struct t10_pr_registration *pr_reg;
641 640
642 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); 641 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
@@ -645,7 +644,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
645 return NULL; 644 return NULL;
646 } 645 }
647 646
648 pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, 647 pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len,
649 GFP_ATOMIC); 648 GFP_ATOMIC);
650 if (!pr_reg->pr_aptpl_buf) { 649 if (!pr_reg->pr_aptpl_buf) {
651 pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); 650 pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
@@ -929,7 +928,7 @@ static int __core_scsi3_check_aptpl_registration(
929 struct se_dev_entry *deve) 928 struct se_dev_entry *deve)
930{ 929{
931 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 930 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
932 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 931 struct t10_reservation *pr_tmpl = &dev->t10_pr;
933 unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; 932 unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
934 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; 933 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
935 u16 tpgt; 934 u16 tpgt;
@@ -996,11 +995,10 @@ int core_scsi3_check_aptpl_registration(
996 struct se_lun *lun, 995 struct se_lun *lun,
997 struct se_lun_acl *lun_acl) 996 struct se_lun_acl *lun_acl)
998{ 997{
999 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1000 struct se_node_acl *nacl = lun_acl->se_lun_nacl; 998 struct se_node_acl *nacl = lun_acl->se_lun_nacl;
1001 struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun]; 999 struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
1002 1000
1003 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1001 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1004 return 0; 1002 return 0;
1005 1003
1006 return __core_scsi3_check_aptpl_registration(dev, tpg, lun, 1004 return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1051,10 +1049,9 @@ static void __core_scsi3_add_registration(
1051 int register_type, 1049 int register_type,
1052 int register_move) 1050 int register_move)
1053{ 1051{
1054 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1055 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 1052 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
1056 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; 1053 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
1057 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1054 struct t10_reservation *pr_tmpl = &dev->t10_pr;
1058 1055
1059 /* 1056 /*
1060 * Increment PRgeneration counter for struct se_device upon a successful 1057 * Increment PRgeneration counter for struct se_device upon a successful
@@ -1066,7 +1063,7 @@ static void __core_scsi3_add_registration(
1066 * for the REGISTER. 1063 * for the REGISTER.
1067 */ 1064 */
1068 pr_reg->pr_res_generation = (register_move) ? 1065 pr_reg->pr_res_generation = (register_move) ?
1069 su_dev->t10_pr.pr_generation++ : 1066 dev->t10_pr.pr_generation++ :
1070 core_scsi3_pr_generation(dev); 1067 core_scsi3_pr_generation(dev);
1071 1068
1072 spin_lock(&pr_tmpl->registration_lock); 1069 spin_lock(&pr_tmpl->registration_lock);
@@ -1135,7 +1132,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1135 struct se_node_acl *nacl, 1132 struct se_node_acl *nacl,
1136 unsigned char *isid) 1133 unsigned char *isid)
1137{ 1134{
1138 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1135 struct t10_reservation *pr_tmpl = &dev->t10_pr;
1139 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 1136 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
1140 struct se_portal_group *tpg; 1137 struct se_portal_group *tpg;
1141 1138
@@ -1160,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1160 * for fabric modules (iSCSI) requiring them. 1157 * for fabric modules (iSCSI) requiring them.
1161 */ 1158 */
1162 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 1159 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
1163 if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) 1160 if (dev->dev_attrib.enforce_pr_isids)
1164 continue; 1161 continue;
1165 } 1162 }
1166 atomic_inc(&pr_reg->pr_res_holders); 1163 atomic_inc(&pr_reg->pr_res_holders);
@@ -1274,7 +1271,7 @@ static void __core_scsi3_free_registration(
1274{ 1271{
1275 struct target_core_fabric_ops *tfo = 1272 struct target_core_fabric_ops *tfo =
1276 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; 1273 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1277 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1274 struct t10_reservation *pr_tmpl = &dev->t10_pr;
1278 char i_buf[PR_REG_ISID_ID_LEN]; 1275 char i_buf[PR_REG_ISID_ID_LEN];
1279 int prf_isid; 1276 int prf_isid;
1280 1277
@@ -1335,7 +1332,7 @@ void core_scsi3_free_pr_reg_from_nacl(
1335 struct se_device *dev, 1332 struct se_device *dev,
1336 struct se_node_acl *nacl) 1333 struct se_node_acl *nacl)
1337{ 1334{
1338 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1335 struct t10_reservation *pr_tmpl = &dev->t10_pr;
1339 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1336 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1340 /* 1337 /*
1341 * If the passed se_node_acl matches the reservation holder, 1338 * If the passed se_node_acl matches the reservation holder,
@@ -1365,7 +1362,7 @@ void core_scsi3_free_pr_reg_from_nacl(
1365void core_scsi3_free_all_registrations( 1362void core_scsi3_free_all_registrations(
1366 struct se_device *dev) 1363 struct se_device *dev)
1367{ 1364{
1368 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1365 struct t10_reservation *pr_tmpl = &dev->t10_pr;
1369 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1366 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1370 1367
1371 spin_lock(&dev->dev_reservation_lock); 1368 spin_lock(&dev->dev_reservation_lock);
@@ -1899,7 +1896,6 @@ static int __core_scsi3_update_aptpl_buf(
1899{ 1896{
1900 struct se_lun *lun; 1897 struct se_lun *lun;
1901 struct se_portal_group *tpg; 1898 struct se_portal_group *tpg;
1902 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1903 struct t10_pr_registration *pr_reg; 1899 struct t10_pr_registration *pr_reg;
1904 unsigned char tmp[512], isid_buf[32]; 1900 unsigned char tmp[512], isid_buf[32];
1905 ssize_t len = 0; 1901 ssize_t len = 0;
@@ -1917,8 +1913,8 @@ static int __core_scsi3_update_aptpl_buf(
1917 /* 1913 /*
1918 * Walk the registration list.. 1914 * Walk the registration list..
1919 */ 1915 */
1920 spin_lock(&su_dev->t10_pr.registration_lock); 1916 spin_lock(&dev->t10_pr.registration_lock);
1921 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 1917 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1922 pr_reg_list) { 1918 pr_reg_list) {
1923 1919
1924 tmp[0] = '\0'; 1920 tmp[0] = '\0';
@@ -1963,7 +1959,7 @@ static int __core_scsi3_update_aptpl_buf(
1963 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1959 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1964 pr_err("Unable to update renaming" 1960 pr_err("Unable to update renaming"
1965 " APTPL metadata\n"); 1961 " APTPL metadata\n");
1966 spin_unlock(&su_dev->t10_pr.registration_lock); 1962 spin_unlock(&dev->t10_pr.registration_lock);
1967 return -EMSGSIZE; 1963 return -EMSGSIZE;
1968 } 1964 }
1969 len += sprintf(buf+len, "%s", tmp); 1965 len += sprintf(buf+len, "%s", tmp);
@@ -1981,13 +1977,13 @@ static int __core_scsi3_update_aptpl_buf(
1981 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1977 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1982 pr_err("Unable to update renaming" 1978 pr_err("Unable to update renaming"
1983 " APTPL metadata\n"); 1979 " APTPL metadata\n");
1984 spin_unlock(&su_dev->t10_pr.registration_lock); 1980 spin_unlock(&dev->t10_pr.registration_lock);
1985 return -EMSGSIZE; 1981 return -EMSGSIZE;
1986 } 1982 }
1987 len += sprintf(buf+len, "%s", tmp); 1983 len += sprintf(buf+len, "%s", tmp);
1988 reg_count++; 1984 reg_count++;
1989 } 1985 }
1990 spin_unlock(&su_dev->t10_pr.registration_lock); 1986 spin_unlock(&dev->t10_pr.registration_lock);
1991 1987
1992 if (!reg_count) 1988 if (!reg_count)
1993 len += sprintf(buf+len, "No Registrations or Reservations"); 1989 len += sprintf(buf+len, "No Registrations or Reservations");
@@ -2019,7 +2015,7 @@ static int __core_scsi3_write_aptpl_to_file(
2019 unsigned char *buf, 2015 unsigned char *buf,
2020 u32 pr_aptpl_buf_len) 2016 u32 pr_aptpl_buf_len)
2021{ 2017{
2022 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 2018 struct t10_wwn *wwn = &dev->t10_wwn;
2023 struct file *file; 2019 struct file *file;
2024 struct iovec iov[1]; 2020 struct iovec iov[1];
2025 mm_segment_t old_fs; 2021 mm_segment_t old_fs;
@@ -2120,7 +2116,7 @@ static int core_scsi3_emulate_pro_register(
2120 struct se_lun *se_lun = cmd->se_lun; 2116 struct se_lun *se_lun = cmd->se_lun;
2121 struct se_portal_group *se_tpg; 2117 struct se_portal_group *se_tpg;
2122 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; 2118 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
2123 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2119 struct t10_reservation *pr_tmpl = &dev->t10_pr;
2124 /* Used for APTPL metadata w/ UNREGISTER */ 2120 /* Used for APTPL metadata w/ UNREGISTER */
2125 unsigned char *pr_aptpl_buf = NULL; 2121 unsigned char *pr_aptpl_buf = NULL;
2126 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; 2122 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
@@ -2434,7 +2430,7 @@ static int core_scsi3_pro_reserve(
2434 struct se_session *se_sess = cmd->se_sess; 2430 struct se_session *se_sess = cmd->se_sess;
2435 struct se_lun *se_lun = cmd->se_lun; 2431 struct se_lun *se_lun = cmd->se_lun;
2436 struct t10_pr_registration *pr_reg, *pr_res_holder; 2432 struct t10_pr_registration *pr_reg, *pr_res_holder;
2437 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2433 struct t10_reservation *pr_tmpl = &dev->t10_pr;
2438 char i_buf[PR_REG_ISID_ID_LEN]; 2434 char i_buf[PR_REG_ISID_ID_LEN];
2439 int ret, prf_isid; 2435 int ret, prf_isid;
2440 2436
@@ -2667,7 +2663,7 @@ static int core_scsi3_emulate_pro_release(
2667 struct se_session *se_sess = cmd->se_sess; 2663 struct se_session *se_sess = cmd->se_sess;
2668 struct se_lun *se_lun = cmd->se_lun; 2664 struct se_lun *se_lun = cmd->se_lun;
2669 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; 2665 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
2670 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2666 struct t10_reservation *pr_tmpl = &dev->t10_pr;
2671 int ret, all_reg = 0; 2667 int ret, all_reg = 0;
2672 2668
2673 if (!se_sess || !se_lun) { 2669 if (!se_sess || !se_lun) {
@@ -2836,7 +2832,7 @@ static int core_scsi3_emulate_pro_clear(
2836 struct se_device *dev = cmd->se_dev; 2832 struct se_device *dev = cmd->se_dev;
2837 struct se_node_acl *pr_reg_nacl; 2833 struct se_node_acl *pr_reg_nacl;
2838 struct se_session *se_sess = cmd->se_sess; 2834 struct se_session *se_sess = cmd->se_sess;
2839 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2835 struct t10_reservation *pr_tmpl = &dev->t10_pr;
2840 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; 2836 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
2841 u32 pr_res_mapped_lun = 0; 2837 u32 pr_res_mapped_lun = 0;
2842 int calling_it_nexus = 0; 2838 int calling_it_nexus = 0;
@@ -3006,7 +3002,7 @@ static int core_scsi3_pro_preempt(
3006 struct se_session *se_sess = cmd->se_sess; 3002 struct se_session *se_sess = cmd->se_sess;
3007 LIST_HEAD(preempt_and_abort_list); 3003 LIST_HEAD(preempt_and_abort_list);
3008 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; 3004 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
3009 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3005 struct t10_reservation *pr_tmpl = &dev->t10_pr;
3010 u32 pr_res_mapped_lun = 0; 3006 u32 pr_res_mapped_lun = 0;
3011 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 3007 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
3012 int prh_type = 0, prh_scope = 0, ret; 3008 int prh_type = 0, prh_scope = 0, ret;
@@ -3358,7 +3354,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3358 struct se_portal_group *se_tpg, *dest_se_tpg = NULL; 3354 struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
3359 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3355 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
3360 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; 3356 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
3361 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3357 struct t10_reservation *pr_tmpl = &dev->t10_pr;
3362 unsigned char *buf; 3358 unsigned char *buf;
3363 unsigned char *initiator_str; 3359 unsigned char *initiator_str;
3364 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 3360 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
@@ -3823,7 +3819,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3823 * initiator or service action and shall terminate with a RESERVATION 3819 * initiator or service action and shall terminate with a RESERVATION
3824 * CONFLICT status. 3820 * CONFLICT status.
3825 */ 3821 */
3826 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { 3822 if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
3827 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 3823 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
3828 " SPC-2 reservation is held, returning" 3824 " SPC-2 reservation is held, returning"
3829 " RESERVATION_CONFLICT\n"); 3825 " RESERVATION_CONFLICT\n");
@@ -3959,8 +3955,7 @@ out:
3959 */ 3955 */
3960static int core_scsi3_pri_read_keys(struct se_cmd *cmd) 3956static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3961{ 3957{
3962 struct se_device *se_dev = cmd->se_dev; 3958 struct se_device *dev = cmd->se_dev;
3963 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
3964 struct t10_pr_registration *pr_reg; 3959 struct t10_pr_registration *pr_reg;
3965 unsigned char *buf; 3960 unsigned char *buf;
3966 u32 add_len = 0, off = 8; 3961 u32 add_len = 0, off = 8;
@@ -3973,13 +3968,13 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3973 } 3968 }
3974 3969
3975 buf = transport_kmap_data_sg(cmd); 3970 buf = transport_kmap_data_sg(cmd);
3976 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 3971 buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
3977 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 3972 buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
3978 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 3973 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
3979 buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 3974 buf[3] = (dev->t10_pr.pr_generation & 0xff);
3980 3975
3981 spin_lock(&su_dev->t10_pr.registration_lock); 3976 spin_lock(&dev->t10_pr.registration_lock);
3982 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 3977 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
3983 pr_reg_list) { 3978 pr_reg_list) {
3984 /* 3979 /*
3985 * Check for overflow of 8byte PRI READ_KEYS payload and 3980 * Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3999,7 +3994,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3999 3994
4000 add_len += 8; 3995 add_len += 8;
4001 } 3996 }
4002 spin_unlock(&su_dev->t10_pr.registration_lock); 3997 spin_unlock(&dev->t10_pr.registration_lock);
4003 3998
4004 buf[4] = ((add_len >> 24) & 0xff); 3999 buf[4] = ((add_len >> 24) & 0xff);
4005 buf[5] = ((add_len >> 16) & 0xff); 4000 buf[5] = ((add_len >> 16) & 0xff);
@@ -4018,8 +4013,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
4018 */ 4013 */
4019static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) 4014static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
4020{ 4015{
4021 struct se_device *se_dev = cmd->se_dev; 4016 struct se_device *dev = cmd->se_dev;
4022 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
4023 struct t10_pr_registration *pr_reg; 4017 struct t10_pr_registration *pr_reg;
4024 unsigned char *buf; 4018 unsigned char *buf;
4025 u64 pr_res_key; 4019 u64 pr_res_key;
@@ -4033,13 +4027,13 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
4033 } 4027 }
4034 4028
4035 buf = transport_kmap_data_sg(cmd); 4029 buf = transport_kmap_data_sg(cmd);
4036 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4030 buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
4037 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4031 buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
4038 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 4032 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
4039 buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 4033 buf[3] = (dev->t10_pr.pr_generation & 0xff);
4040 4034
4041 spin_lock(&se_dev->dev_reservation_lock); 4035 spin_lock(&dev->dev_reservation_lock);
4042 pr_reg = se_dev->dev_pr_res_holder; 4036 pr_reg = dev->dev_pr_res_holder;
4043 if (pr_reg) { 4037 if (pr_reg) {
4044 /* 4038 /*
4045 * Set the hardcoded Additional Length 4039 * Set the hardcoded Additional Length
@@ -4090,7 +4084,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
4090 } 4084 }
4091 4085
4092err: 4086err:
4093 spin_unlock(&se_dev->dev_reservation_lock); 4087 spin_unlock(&dev->dev_reservation_lock);
4094 transport_kunmap_data_sg(cmd); 4088 transport_kunmap_data_sg(cmd);
4095 4089
4096 return 0; 4090 return 0;
@@ -4104,7 +4098,7 @@ err:
4104static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) 4098static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4105{ 4099{
4106 struct se_device *dev = cmd->se_dev; 4100 struct se_device *dev = cmd->se_dev;
4107 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 4101 struct t10_reservation *pr_tmpl = &dev->t10_pr;
4108 unsigned char *buf; 4102 unsigned char *buf;
4109 u16 add_len = 8; /* Hardcoded to 8. */ 4103 u16 add_len = 8; /* Hardcoded to 8. */
4110 4104
@@ -4159,12 +4153,11 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4159 */ 4153 */
4160static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) 4154static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4161{ 4155{
4162 struct se_device *se_dev = cmd->se_dev; 4156 struct se_device *dev = cmd->se_dev;
4163 struct se_node_acl *se_nacl; 4157 struct se_node_acl *se_nacl;
4164 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
4165 struct se_portal_group *se_tpg; 4158 struct se_portal_group *se_tpg;
4166 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 4159 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
4167 struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; 4160 struct t10_reservation *pr_tmpl = &dev->t10_pr;
4168 unsigned char *buf; 4161 unsigned char *buf;
4169 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; 4162 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
4170 u32 off = 8; /* off into first Full Status descriptor */ 4163 u32 off = 8; /* off into first Full Status descriptor */
@@ -4179,10 +4172,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4179 4172
4180 buf = transport_kmap_data_sg(cmd); 4173 buf = transport_kmap_data_sg(cmd);
4181 4174
4182 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4175 buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
4183 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4176 buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
4184 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 4177 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
4185 buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 4178 buf[3] = (dev->t10_pr.pr_generation & 0xff);
4186 4179
4187 spin_lock(&pr_tmpl->registration_lock); 4180 spin_lock(&pr_tmpl->registration_lock);
4188 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 4181 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4316,7 +4309,7 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
4316 * initiator or service action and shall terminate with a RESERVATION 4309 * initiator or service action and shall terminate with a RESERVATION
4317 * CONFLICT status. 4310 * CONFLICT status.
4318 */ 4311 */
4319 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { 4312 if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
4320 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4313 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
4321 " SPC-2 reservation is held, returning" 4314 " SPC-2 reservation is held, returning"
4322 " RESERVATION_CONFLICT\n"); 4315 " RESERVATION_CONFLICT\n");
@@ -4363,30 +4356,25 @@ static int core_pt_seq_non_holder(
4363 return 0; 4356 return 0;
4364} 4357}
4365 4358
4366int core_setup_reservations(struct se_device *dev, int force_pt) 4359void core_setup_reservations(struct se_device *dev)
4367{ 4360{
4368 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 4361 struct t10_reservation *rest = &dev->t10_pr;
4369 struct t10_reservation *rest = &su_dev->t10_pr; 4362
4370 /* 4363 /*
4371 * If this device is from Target_Core_Mod/pSCSI, use the reservations 4364 * If this device is from Target_Core_Mod/pSCSI, use the reservations
4372 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can 4365 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
4373 * cause a problem because libata and some SATA RAID HBAs appear 4366 * cause a problem because libata and some SATA RAID HBAs appear
4374 * under Linux/SCSI, but to emulate reservations themselves. 4367 * under Linux/SCSI, but to emulate reservations themselves.
4375 */ 4368 */
4376 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && 4369 if ((dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) ||
4377 !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) { 4370 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV &&
4371 !dev->dev_attrib.emulate_reservations)) {
4378 rest->res_type = SPC_PASSTHROUGH; 4372 rest->res_type = SPC_PASSTHROUGH;
4379 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; 4373 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
4380 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; 4374 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
4381 pr_debug("%s: Using SPC_PASSTHROUGH, no reservation" 4375 pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
4382 " emulation\n", dev->transport->name); 4376 " emulation\n", dev->transport->name);
4383 return 0; 4377 } else if (dev->transport->get_device_rev(dev) >= SCSI_3) {
4384 }
4385 /*
4386 * If SPC-3 or above is reported by real or emulated struct se_device,
4387 * use emulated Persistent Reservations.
4388 */
4389 if (dev->transport->get_device_rev(dev) >= SCSI_3) {
4390 rest->res_type = SPC3_PERSISTENT_RESERVATIONS; 4378 rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
4391 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; 4379 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
4392 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; 4380 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
@@ -4400,6 +4388,4 @@ int core_setup_reservations(struct se_device *dev, int force_pt)
4400 pr_debug("%s: Using SPC2_RESERVATIONS emulation\n", 4388 pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
4401 dev->transport->name); 4389 dev->transport->name);
4402 } 4390 }
4403
4404 return 0;
4405} 4391}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index af6c460d886d..78451437d2c2 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -63,6 +63,6 @@ extern unsigned char *core_scsi3_pr_dump_type(int);
63 63
64extern int target_scsi3_emulate_pr_in(struct se_cmd *); 64extern int target_scsi3_emulate_pr_in(struct se_cmd *);
65extern int target_scsi3_emulate_pr_out(struct se_cmd *); 65extern int target_scsi3_emulate_pr_out(struct se_cmd *);
66extern int core_setup_reservations(struct se_device *, int); 66extern void core_setup_reservations(struct se_device *);
67 67
68#endif /* TARGET_CORE_PR_H */ 68#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 617c086a8a02..b2965084eb4f 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -53,6 +53,11 @@
53 53
54#define ISPRINT(a) ((a >= ' ') && (a <= '~')) 54#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
55 55
56static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
57{
58 return container_of(dev, struct pscsi_dev_virt, dev);
59}
60
56static struct se_subsystem_api pscsi_template; 61static struct se_subsystem_api pscsi_template;
57 62
58static int pscsi_execute_cmd(struct se_cmd *cmd); 63static int pscsi_execute_cmd(struct se_cmd *cmd);
@@ -219,7 +224,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
219 224
220 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); 225 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
221 226
222 wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; 227 wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
223 228
224 kfree(buf); 229 kfree(buf);
225 return 0; 230 return 0;
@@ -299,23 +304,13 @@ out:
299 kfree(buf); 304 kfree(buf);
300} 305}
301 306
302/* pscsi_add_device_to_list(): 307static int pscsi_add_device_to_list(struct se_device *dev,
303 * 308 struct scsi_device *sd)
304 *
305 */
306static struct se_device *pscsi_add_device_to_list(
307 struct se_hba *hba,
308 struct se_subsystem_dev *se_dev,
309 struct pscsi_dev_virt *pdv,
310 struct scsi_device *sd,
311 int dev_flags)
312{ 309{
313 struct se_device *dev; 310 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
314 struct se_dev_limits dev_limits; 311 struct request_queue *q = sd->request_queue;
315 struct request_queue *q;
316 struct queue_limits *limits;
317 312
318 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 313 pdv->pdv_sd = sd;
319 314
320 if (!sd->queue_depth) { 315 if (!sd->queue_depth) {
321 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; 316 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
@@ -324,54 +319,27 @@ static struct se_device *pscsi_add_device_to_list(
324 " queue_depth to %d\n", sd->channel, sd->id, 319 " queue_depth to %d\n", sd->channel, sd->id,
325 sd->lun, sd->queue_depth); 320 sd->lun, sd->queue_depth);
326 } 321 }
327 /* 322
328 * Setup the local scope queue_limits from struct request_queue->limits 323 dev->dev_attrib.hw_block_size = sd->sector_size;
329 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 324 dev->dev_attrib.hw_max_sectors =
330 */ 325 min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
331 q = sd->request_queue; 326 dev->dev_attrib.hw_queue_depth = sd->queue_depth;
332 limits = &dev_limits.limits; 327
333 limits->logical_block_size = sd->sector_size;
334 limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
335 limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
336 dev_limits.hw_queue_depth = sd->queue_depth;
337 dev_limits.queue_depth = sd->queue_depth;
338 /* 328 /*
339 * Setup our standard INQUIRY info into se_dev->t10_wwn 329 * Setup our standard INQUIRY info into se_dev->t10_wwn
340 */ 330 */
341 pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); 331 pscsi_set_inquiry_info(sd, &dev->t10_wwn);
342
343 /*
344 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
345 * which has already been referenced with Linux SCSI code with
346 * scsi_device_get() in this file's pscsi_create_virtdevice().
347 *
348 * The passthrough operations called by the transport_add_device_*
349 * function below will require this pointer to be set for passthroug
350 * ops.
351 *
352 * For the shutdown case in pscsi_free_device(), this struct
353 * scsi_device reference is released with Linux SCSI code
354 * scsi_device_put() and the pdv->pdv_sd cleared.
355 */
356 pdv->pdv_sd = sd;
357 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
358 se_dev, dev_flags, pdv,
359 &dev_limits, NULL, NULL);
360 if (!dev) {
361 pdv->pdv_sd = NULL;
362 return NULL;
363 }
364 332
365 /* 333 /*
366 * Locate VPD WWN Information used for various purposes within 334 * Locate VPD WWN Information used for various purposes within
367 * the Storage Engine. 335 * the Storage Engine.
368 */ 336 */
369 if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { 337 if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
370 /* 338 /*
371 * If VPD Unit Serial returned GOOD status, try 339 * If VPD Unit Serial returned GOOD status, try
372 * VPD Device Identification page (0x83). 340 * VPD Device Identification page (0x83).
373 */ 341 */
374 pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); 342 pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
375 } 343 }
376 344
377 /* 345 /*
@@ -379,10 +347,11 @@ static struct se_device *pscsi_add_device_to_list(
379 */ 347 */
380 if (sd->type == TYPE_TAPE) 348 if (sd->type == TYPE_TAPE)
381 pscsi_tape_read_blocksize(dev, sd); 349 pscsi_tape_read_blocksize(dev, sd);
382 return dev; 350 return 0;
383} 351}
384 352
385static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) 353static struct se_device *pscsi_alloc_device(struct se_hba *hba,
354 const char *name)
386{ 355{
387 struct pscsi_dev_virt *pdv; 356 struct pscsi_dev_virt *pdv;
388 357
@@ -391,139 +360,125 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
391 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); 360 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
392 return NULL; 361 return NULL;
393 } 362 }
394 pdv->pdv_se_hba = hba;
395 363
396 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); 364 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
397 return pdv; 365 return &pdv->dev;
398} 366}
399 367
400/* 368/*
401 * Called with struct Scsi_Host->host_lock called. 369 * Called with struct Scsi_Host->host_lock called.
402 */ 370 */
403static struct se_device *pscsi_create_type_disk( 371static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
404 struct scsi_device *sd,
405 struct pscsi_dev_virt *pdv,
406 struct se_subsystem_dev *se_dev,
407 struct se_hba *hba)
408 __releases(sh->host_lock) 372 __releases(sh->host_lock)
409{ 373{
410 struct se_device *dev; 374 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
411 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 375 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
412 struct Scsi_Host *sh = sd->host; 376 struct Scsi_Host *sh = sd->host;
413 struct block_device *bd; 377 struct block_device *bd;
414 u32 dev_flags = 0; 378 int ret;
415 379
416 if (scsi_device_get(sd)) { 380 if (scsi_device_get(sd)) {
417 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 381 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
418 sh->host_no, sd->channel, sd->id, sd->lun); 382 sh->host_no, sd->channel, sd->id, sd->lun);
419 spin_unlock_irq(sh->host_lock); 383 spin_unlock_irq(sh->host_lock);
420 return NULL; 384 return -EIO;
421 } 385 }
422 spin_unlock_irq(sh->host_lock); 386 spin_unlock_irq(sh->host_lock);
423 /* 387 /*
424 * Claim exclusive struct block_device access to struct scsi_device 388 * Claim exclusive struct block_device access to struct scsi_device
425 * for TYPE_DISK using supplied udev_path 389 * for TYPE_DISK using supplied udev_path
426 */ 390 */
427 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 391 bd = blkdev_get_by_path(dev->udev_path,
428 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 392 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
429 if (IS_ERR(bd)) { 393 if (IS_ERR(bd)) {
430 pr_err("pSCSI: blkdev_get_by_path() failed\n"); 394 pr_err("pSCSI: blkdev_get_by_path() failed\n");
431 scsi_device_put(sd); 395 scsi_device_put(sd);
432 return NULL; 396 return PTR_ERR(bd);
433 } 397 }
434 pdv->pdv_bd = bd; 398 pdv->pdv_bd = bd;
435 399
436 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 400 ret = pscsi_add_device_to_list(dev, sd);
437 if (!dev) { 401 if (ret) {
438 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 402 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
439 scsi_device_put(sd); 403 scsi_device_put(sd);
440 return NULL; 404 return ret;
441 } 405 }
406
442 pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", 407 pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
443 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 408 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
444 409 return 0;
445 return dev;
446} 410}
447 411
448/* 412/*
449 * Called with struct Scsi_Host->host_lock called. 413 * Called with struct Scsi_Host->host_lock called.
450 */ 414 */
451static struct se_device *pscsi_create_type_rom( 415static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
452 struct scsi_device *sd,
453 struct pscsi_dev_virt *pdv,
454 struct se_subsystem_dev *se_dev,
455 struct se_hba *hba)
456 __releases(sh->host_lock) 416 __releases(sh->host_lock)
457{ 417{
458 struct se_device *dev; 418 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
459 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
460 struct Scsi_Host *sh = sd->host; 419 struct Scsi_Host *sh = sd->host;
461 u32 dev_flags = 0; 420 int ret;
462 421
463 if (scsi_device_get(sd)) { 422 if (scsi_device_get(sd)) {
464 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 423 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
465 sh->host_no, sd->channel, sd->id, sd->lun); 424 sh->host_no, sd->channel, sd->id, sd->lun);
466 spin_unlock_irq(sh->host_lock); 425 spin_unlock_irq(sh->host_lock);
467 return NULL; 426 return -EIO;
468 } 427 }
469 spin_unlock_irq(sh->host_lock); 428 spin_unlock_irq(sh->host_lock);
470 429
471 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 430 ret = pscsi_add_device_to_list(dev, sd);
472 if (!dev) { 431 if (ret) {
473 scsi_device_put(sd); 432 scsi_device_put(sd);
474 return NULL; 433 return ret;
475 } 434 }
476 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 435 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
477 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 436 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
478 sd->channel, sd->id, sd->lun); 437 sd->channel, sd->id, sd->lun);
479 438
480 return dev; 439 return 0;
481} 440}
482 441
483/* 442/*
484 *Called with struct Scsi_Host->host_lock called. 443 * Called with struct Scsi_Host->host_lock called.
485 */ 444 */
486static struct se_device *pscsi_create_type_other( 445static int pscsi_create_type_other(struct se_device *dev,
487 struct scsi_device *sd, 446 struct scsi_device *sd)
488 struct pscsi_dev_virt *pdv,
489 struct se_subsystem_dev *se_dev,
490 struct se_hba *hba)
491 __releases(sh->host_lock) 447 __releases(sh->host_lock)
492{ 448{
493 struct se_device *dev; 449 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
494 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
495 struct Scsi_Host *sh = sd->host; 450 struct Scsi_Host *sh = sd->host;
496 u32 dev_flags = 0; 451 int ret;
497 452
498 spin_unlock_irq(sh->host_lock); 453 spin_unlock_irq(sh->host_lock);
499 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 454 ret = pscsi_add_device_to_list(dev, sd);
500 if (!dev) 455 if (ret)
501 return NULL; 456 return ret;
502 457
503 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 458 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
504 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 459 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
505 sd->channel, sd->id, sd->lun); 460 sd->channel, sd->id, sd->lun);
506 461 return 0;
507 return dev;
508} 462}
509 463
510static struct se_device *pscsi_create_virtdevice( 464int pscsi_configure_device(struct se_device *dev)
511 struct se_hba *hba,
512 struct se_subsystem_dev *se_dev,
513 void *p)
514{ 465{
515 struct pscsi_dev_virt *pdv = p; 466 struct se_hba *hba = dev->se_hba;
516 struct se_device *dev; 467 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
517 struct scsi_device *sd; 468 struct scsi_device *sd;
518 struct pscsi_hba_virt *phv = hba->hba_ptr; 469 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
519 struct Scsi_Host *sh = phv->phv_lld_host; 470 struct Scsi_Host *sh = phv->phv_lld_host;
520 int legacy_mode_enable = 0; 471 int legacy_mode_enable = 0;
472 int ret;
521 473
522 if (!pdv) { 474 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
523 pr_err("Unable to locate struct pscsi_dev_virt" 475 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
524 " parameter\n"); 476 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
525 return ERR_PTR(-EINVAL); 477 pr_err("Missing scsi_channel_id=, scsi_target_id= and"
478 " scsi_lun_id= parameters\n");
479 return -EINVAL;
526 } 480 }
481
527 /* 482 /*
528 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 483 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
529 * struct Scsi_Host we will need to bring the TCM/pSCSI object online 484 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
@@ -532,16 +487,16 @@ static struct se_device *pscsi_create_virtdevice(
532 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 487 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
533 pr_err("pSCSI: Unable to locate struct" 488 pr_err("pSCSI: Unable to locate struct"
534 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 489 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
535 return ERR_PTR(-ENODEV); 490 return -ENODEV;
536 } 491 }
537 /* 492 /*
538 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device 493 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
539 * reference, we enforce that udev_path has been set 494 * reference, we enforce that udev_path has been set
540 */ 495 */
541 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { 496 if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
542 pr_err("pSCSI: udev_path attribute has not" 497 pr_err("pSCSI: udev_path attribute has not"
543 " been set before ENABLE=1\n"); 498 " been set before ENABLE=1\n");
544 return ERR_PTR(-EINVAL); 499 return -EINVAL;
545 } 500 }
546 /* 501 /*
547 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, 502 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
@@ -549,17 +504,14 @@ static struct se_device *pscsi_create_virtdevice(
549 * and enable for PHV_LLD_SCSI_HOST_NO mode. 504 * and enable for PHV_LLD_SCSI_HOST_NO mode.
550 */ 505 */
551 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 506 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
552 spin_lock(&hba->device_lock); 507 if (hba->dev_count) {
553 if (!list_empty(&hba->hba_dev_list)) {
554 pr_err("pSCSI: Unable to set hba_mode" 508 pr_err("pSCSI: Unable to set hba_mode"
555 " with active devices\n"); 509 " with active devices\n");
556 spin_unlock(&hba->device_lock); 510 return -EEXIST;
557 return ERR_PTR(-EEXIST);
558 } 511 }
559 spin_unlock(&hba->device_lock);
560 512
561 if (pscsi_pmode_enable_hba(hba, 1) != 1) 513 if (pscsi_pmode_enable_hba(hba, 1) != 1)
562 return ERR_PTR(-ENODEV); 514 return -ENODEV;
563 515
564 legacy_mode_enable = 1; 516 legacy_mode_enable = 1;
565 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 517 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
@@ -569,14 +521,14 @@ static struct se_device *pscsi_create_virtdevice(
569 if (IS_ERR(sh)) { 521 if (IS_ERR(sh)) {
570 pr_err("pSCSI: Unable to locate" 522 pr_err("pSCSI: Unable to locate"
571 " pdv_host_id: %d\n", pdv->pdv_host_id); 523 " pdv_host_id: %d\n", pdv->pdv_host_id);
572 return ERR_CAST(sh); 524 return PTR_ERR(sh);
573 } 525 }
574 } 526 }
575 } else { 527 } else {
576 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { 528 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
577 pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" 529 pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
578 " struct Scsi_Host exists\n"); 530 " struct Scsi_Host exists\n");
579 return ERR_PTR(-EEXIST); 531 return -EEXIST;
580 } 532 }
581 } 533 }
582 534
@@ -593,17 +545,17 @@ static struct se_device *pscsi_create_virtdevice(
593 */ 545 */
594 switch (sd->type) { 546 switch (sd->type) {
595 case TYPE_DISK: 547 case TYPE_DISK:
596 dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); 548 ret = pscsi_create_type_disk(dev, sd);
597 break; 549 break;
598 case TYPE_ROM: 550 case TYPE_ROM:
599 dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); 551 ret = pscsi_create_type_rom(dev, sd);
600 break; 552 break;
601 default: 553 default:
602 dev = pscsi_create_type_other(sd, pdv, se_dev, hba); 554 ret = pscsi_create_type_other(dev, sd);
603 break; 555 break;
604 } 556 }
605 557
606 if (!dev) { 558 if (ret) {
607 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 559 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
608 scsi_host_put(sh); 560 scsi_host_put(sh);
609 else if (legacy_mode_enable) { 561 else if (legacy_mode_enable) {
@@ -611,9 +563,9 @@ static struct se_device *pscsi_create_virtdevice(
611 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 563 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
612 } 564 }
613 pdv->pdv_sd = NULL; 565 pdv->pdv_sd = NULL;
614 return ERR_PTR(-ENODEV); 566 return ret;
615 } 567 }
616 return dev; 568 return 0;
617 } 569 }
618 spin_unlock_irq(sh->host_lock); 570 spin_unlock_irq(sh->host_lock);
619 571
@@ -627,17 +579,13 @@ static struct se_device *pscsi_create_virtdevice(
627 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 579 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
628 } 580 }
629 581
630 return ERR_PTR(-ENODEV); 582 return -ENODEV;
631} 583}
632 584
633/* pscsi_free_device(): (Part of se_subsystem_api_t template) 585static void pscsi_free_device(struct se_device *dev)
634 *
635 *
636 */
637static void pscsi_free_device(void *p)
638{ 586{
639 struct pscsi_dev_virt *pdv = p; 587 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
640 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 588 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
641 struct scsi_device *sd = pdv->pdv_sd; 589 struct scsi_device *sd = pdv->pdv_sd;
642 590
643 if (sd) { 591 if (sd) {
@@ -670,7 +618,7 @@ static void pscsi_free_device(void *p)
670static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, 618static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
671 unsigned char *sense_buffer) 619 unsigned char *sense_buffer)
672{ 620{
673 struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; 621 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
674 struct scsi_device *sd = pdv->pdv_sd; 622 struct scsi_device *sd = pdv->pdv_sd;
675 int result; 623 int result;
676 struct pscsi_plugin_task *pt = cmd->priv; 624 struct pscsi_plugin_task *pt = cmd->priv;
@@ -770,13 +718,11 @@ static match_table_t tokens = {
770 {Opt_err, NULL} 718 {Opt_err, NULL}
771}; 719};
772 720
773static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, 721static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
774 struct se_subsystem_dev *se_dev, 722 const char *page, ssize_t count)
775 const char *page,
776 ssize_t count)
777{ 723{
778 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 724 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
779 struct pscsi_hba_virt *phv = hba->hba_ptr; 725 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
780 char *orig, *ptr, *opts; 726 char *orig, *ptr, *opts;
781 substring_t args[MAX_OPT_ARGS]; 727 substring_t args[MAX_OPT_ARGS];
782 int ret = 0, arg, token; 728 int ret = 0, arg, token;
@@ -841,29 +787,10 @@ out:
841 return (!ret) ? count : ret; 787 return (!ret) ? count : ret;
842} 788}
843 789
844static ssize_t pscsi_check_configfs_dev_params( 790static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
845 struct se_hba *hba,
846 struct se_subsystem_dev *se_dev)
847{
848 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
849
850 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
851 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
852 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
853 pr_err("Missing scsi_channel_id=, scsi_target_id= and"
854 " scsi_lun_id= parameters\n");
855 return -EINVAL;
856 }
857
858 return 0;
859}
860
861static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
862 struct se_subsystem_dev *se_dev,
863 char *b)
864{ 791{
865 struct pscsi_hba_virt *phv = hba->hba_ptr; 792 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
866 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 793 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
867 struct scsi_device *sd = pdv->pdv_sd; 794 struct scsi_device *sd = pdv->pdv_sd;
868 unsigned char host_id[16]; 795 unsigned char host_id[16];
869 ssize_t bl; 796 ssize_t bl;
@@ -933,7 +860,7 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
933 u32 sgl_nents, enum dma_data_direction data_direction, 860 u32 sgl_nents, enum dma_data_direction data_direction,
934 struct bio **hbio) 861 struct bio **hbio)
935{ 862{
936 struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; 863 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
937 struct bio *bio = NULL, *tbio = NULL; 864 struct bio *bio = NULL, *tbio = NULL;
938 struct page *page; 865 struct page *page;
939 struct scatterlist *sg; 866 struct scatterlist *sg;
@@ -1104,7 +1031,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
1104 struct scatterlist *sgl = cmd->t_data_sg; 1031 struct scatterlist *sgl = cmd->t_data_sg;
1105 u32 sgl_nents = cmd->t_data_nents; 1032 u32 sgl_nents = cmd->t_data_nents;
1106 enum dma_data_direction data_direction = cmd->data_direction; 1033 enum dma_data_direction data_direction = cmd->data_direction;
1107 struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; 1034 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
1108 struct pscsi_plugin_task *pt; 1035 struct pscsi_plugin_task *pt;
1109 struct request *req; 1036 struct request *req;
1110 struct bio *hbio; 1037 struct bio *hbio;
@@ -1191,7 +1118,7 @@ fail:
1191 */ 1118 */
1192static u32 pscsi_get_device_rev(struct se_device *dev) 1119static u32 pscsi_get_device_rev(struct se_device *dev)
1193{ 1120{
1194 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1195 struct scsi_device *sd = pdv->pdv_sd; 1122 struct scsi_device *sd = pdv->pdv_sd;
1196 1123
1197 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; 1124 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
@@ -1203,7 +1130,7 @@ static u32 pscsi_get_device_rev(struct se_device *dev)
1203 */ 1130 */
1204static u32 pscsi_get_device_type(struct se_device *dev) 1131static u32 pscsi_get_device_type(struct se_device *dev)
1205{ 1132{
1206 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1133 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1207 struct scsi_device *sd = pdv->pdv_sd; 1134 struct scsi_device *sd = pdv->pdv_sd;
1208 1135
1209 return sd->type; 1136 return sd->type;
@@ -1211,7 +1138,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
1211 1138
1212static sector_t pscsi_get_blocks(struct se_device *dev) 1139static sector_t pscsi_get_blocks(struct se_device *dev)
1213{ 1140{
1214 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1141 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1215 1142
1216 if (pdv->pdv_bd && pdv->pdv_bd->bd_part) 1143 if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
1217 return pdv->pdv_bd->bd_part->nr_sects; 1144 return pdv->pdv_bd->bd_part->nr_sects;
@@ -1259,12 +1186,11 @@ static struct se_subsystem_api pscsi_template = {
1259 .attach_hba = pscsi_attach_hba, 1186 .attach_hba = pscsi_attach_hba,
1260 .detach_hba = pscsi_detach_hba, 1187 .detach_hba = pscsi_detach_hba,
1261 .pmode_enable_hba = pscsi_pmode_enable_hba, 1188 .pmode_enable_hba = pscsi_pmode_enable_hba,
1262 .allocate_virtdevice = pscsi_allocate_virtdevice, 1189 .alloc_device = pscsi_alloc_device,
1263 .create_virtdevice = pscsi_create_virtdevice, 1190 .configure_device = pscsi_configure_device,
1264 .free_device = pscsi_free_device, 1191 .free_device = pscsi_free_device,
1265 .transport_complete = pscsi_transport_complete, 1192 .transport_complete = pscsi_transport_complete,
1266 .parse_cdb = pscsi_parse_cdb, 1193 .parse_cdb = pscsi_parse_cdb,
1267 .check_configfs_dev_params = pscsi_check_configfs_dev_params,
1268 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1194 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
1269 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1195 .show_configfs_dev_params = pscsi_show_configfs_dev_params,
1270 .get_device_rev = pscsi_get_device_rev, 1196 .get_device_rev = pscsi_get_device_rev,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index bc1e5e11eca0..1bd757dff8ee 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
37#define PDF_HAS_VIRT_HOST_ID 0x20 37#define PDF_HAS_VIRT_HOST_ID 0x20
38 38
39struct pscsi_dev_virt { 39struct pscsi_dev_virt {
40 struct se_device dev;
40 int pdv_flags; 41 int pdv_flags;
41 int pdv_host_id; 42 int pdv_host_id;
42 int pdv_channel_id; 43 int pdv_channel_id;
@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
44 int pdv_lun_id; 45 int pdv_lun_id;
45 struct block_device *pdv_bd; 46 struct block_device *pdv_bd;
46 struct scsi_device *pdv_sd; 47 struct scsi_device *pdv_sd;
47 struct se_hba *pdv_se_hba;
48} ____cacheline_aligned; 48} ____cacheline_aligned;
49 49
50typedef enum phv_modes { 50typedef enum phv_modes {
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index d00bbe33ff8b..d083f39c4b67 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -41,7 +41,10 @@
41 41
42#include "target_core_rd.h" 42#include "target_core_rd.h"
43 43
44static struct se_subsystem_api rd_mcp_template; 44static inline struct rd_dev *RD_DEV(struct se_device *dev)
45{
46 return container_of(dev, struct rd_dev, dev);
47}
45 48
46/* rd_attach_hba(): (Part of se_subsystem_api_t template) 49/* rd_attach_hba(): (Part of se_subsystem_api_t template)
47 * 50 *
@@ -196,7 +199,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
196 return 0; 199 return 0;
197} 200}
198 201
199static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) 202static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
200{ 203{
201 struct rd_dev *rd_dev; 204 struct rd_dev *rd_dev;
202 struct rd_host *rd_host = hba->hba_ptr; 205 struct rd_host *rd_host = hba->hba_ptr;
@@ -209,39 +212,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
209 212
210 rd_dev->rd_host = rd_host; 213 rd_dev->rd_host = rd_host;
211 214
212 return rd_dev; 215 return &rd_dev->dev;
213} 216}
214 217
215static struct se_device *rd_create_virtdevice(struct se_hba *hba, 218static int rd_configure_device(struct se_device *dev)
216 struct se_subsystem_dev *se_dev, void *p)
217{ 219{
218 struct se_device *dev; 220 struct rd_dev *rd_dev = RD_DEV(dev);
219 struct se_dev_limits dev_limits; 221 struct rd_host *rd_host = dev->se_hba->hba_ptr;
220 struct rd_dev *rd_dev = p; 222 int ret;
221 struct rd_host *rd_host = hba->hba_ptr;
222 int dev_flags = 0, ret;
223 char prod[16], rev[4];
224 223
225 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 224 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
225 pr_debug("Missing rd_pages= parameter\n");
226 return -EINVAL;
227 }
226 228
227 ret = rd_build_device_space(rd_dev); 229 ret = rd_build_device_space(rd_dev);
228 if (ret < 0) 230 if (ret < 0)
229 goto fail; 231 goto fail;
230 232
231 snprintf(prod, 16, "RAMDISK-MCP"); 233 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
232 snprintf(rev, 4, "%s", RD_MCP_VERSION); 234 dev->dev_attrib.hw_max_sectors = UINT_MAX;
233 235 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
234 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
235 dev_limits.limits.max_hw_sectors = UINT_MAX;
236 dev_limits.limits.max_sectors = UINT_MAX;
237 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
238 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
239
240 dev = transport_add_device_to_core_hba(hba,
241 &rd_mcp_template, se_dev, dev_flags, rd_dev,
242 &dev_limits, prod, rev);
243 if (!dev)
244 goto fail;
245 236
246 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 237 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
247 238
@@ -251,16 +242,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
251 rd_dev->sg_table_count, 242 rd_dev->sg_table_count,
252 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); 243 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
253 244
254 return dev; 245 return 0;
255 246
256fail: 247fail:
257 rd_release_device_space(rd_dev); 248 rd_release_device_space(rd_dev);
258 return ERR_PTR(ret); 249 return ret;
259} 250}
260 251
261static void rd_free_device(void *p) 252static void rd_free_device(struct se_device *dev)
262{ 253{
263 struct rd_dev *rd_dev = p; 254 struct rd_dev *rd_dev = RD_DEV(dev);
264 255
265 rd_release_device_space(rd_dev); 256 rd_release_device_space(rd_dev);
266 kfree(rd_dev); 257 kfree(rd_dev);
@@ -290,7 +281,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
290 u32 sgl_nents = cmd->t_data_nents; 281 u32 sgl_nents = cmd->t_data_nents;
291 enum dma_data_direction data_direction = cmd->data_direction; 282 enum dma_data_direction data_direction = cmd->data_direction;
292 struct se_device *se_dev = cmd->se_dev; 283 struct se_device *se_dev = cmd->se_dev;
293 struct rd_dev *dev = se_dev->dev_ptr; 284 struct rd_dev *dev = RD_DEV(se_dev);
294 struct rd_dev_sg_table *table; 285 struct rd_dev_sg_table *table;
295 struct scatterlist *rd_sg; 286 struct scatterlist *rd_sg;
296 struct sg_mapping_iter m; 287 struct sg_mapping_iter m;
@@ -300,7 +291,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
300 u32 src_len; 291 u32 src_len;
301 u64 tmp; 292 u64 tmp;
302 293
303 tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; 294 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
304 rd_offset = do_div(tmp, PAGE_SIZE); 295 rd_offset = do_div(tmp, PAGE_SIZE);
305 rd_page = tmp; 296 rd_page = tmp;
306 rd_size = cmd->data_length; 297 rd_size = cmd->data_length;
@@ -378,13 +369,10 @@ static match_table_t tokens = {
378 {Opt_err, NULL} 369 {Opt_err, NULL}
379}; 370};
380 371
381static ssize_t rd_set_configfs_dev_params( 372static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
382 struct se_hba *hba, 373 const char *page, ssize_t count)
383 struct se_subsystem_dev *se_dev,
384 const char *page,
385 ssize_t count)
386{ 374{
387 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 375 struct rd_dev *rd_dev = RD_DEV(dev);
388 char *orig, *ptr, *opts; 376 char *orig, *ptr, *opts;
389 substring_t args[MAX_OPT_ARGS]; 377 substring_t args[MAX_OPT_ARGS];
390 int ret = 0, arg, token; 378 int ret = 0, arg, token;
@@ -417,24 +405,10 @@ static ssize_t rd_set_configfs_dev_params(
417 return (!ret) ? count : ret; 405 return (!ret) ? count : ret;
418} 406}
419 407
420static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 408static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
421{ 409{
422 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 410 struct rd_dev *rd_dev = RD_DEV(dev);
423
424 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
425 pr_debug("Missing rd_pages= parameter\n");
426 return -EINVAL;
427 }
428 411
429 return 0;
430}
431
432static ssize_t rd_show_configfs_dev_params(
433 struct se_hba *hba,
434 struct se_subsystem_dev *se_dev,
435 char *b)
436{
437 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
438 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", 412 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
439 rd_dev->rd_dev_id); 413 rd_dev->rd_dev_id);
440 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 414 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
@@ -455,9 +429,10 @@ static u32 rd_get_device_type(struct se_device *dev)
455 429
456static sector_t rd_get_blocks(struct se_device *dev) 430static sector_t rd_get_blocks(struct se_device *dev)
457{ 431{
458 struct rd_dev *rd_dev = dev->dev_ptr; 432 struct rd_dev *rd_dev = RD_DEV(dev);
433
459 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / 434 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
460 dev->se_sub_dev->se_dev_attrib.block_size) - 1; 435 dev->dev_attrib.block_size) - 1;
461 436
462 return blocks_long; 437 return blocks_long;
463} 438}
@@ -473,14 +448,15 @@ static int rd_parse_cdb(struct se_cmd *cmd)
473 448
474static struct se_subsystem_api rd_mcp_template = { 449static struct se_subsystem_api rd_mcp_template = {
475 .name = "rd_mcp", 450 .name = "rd_mcp",
451 .inquiry_prod = "RAMDISK-MCP",
452 .inquiry_rev = RD_MCP_VERSION,
476 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 453 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
477 .attach_hba = rd_attach_hba, 454 .attach_hba = rd_attach_hba,
478 .detach_hba = rd_detach_hba, 455 .detach_hba = rd_detach_hba,
479 .allocate_virtdevice = rd_allocate_virtdevice, 456 .alloc_device = rd_alloc_device,
480 .create_virtdevice = rd_create_virtdevice, 457 .configure_device = rd_configure_device,
481 .free_device = rd_free_device, 458 .free_device = rd_free_device,
482 .parse_cdb = rd_parse_cdb, 459 .parse_cdb = rd_parse_cdb,
483 .check_configfs_dev_params = rd_check_configfs_dev_params,
484 .set_configfs_dev_params = rd_set_configfs_dev_params, 460 .set_configfs_dev_params = rd_set_configfs_dev_params,
485 .show_configfs_dev_params = rd_show_configfs_dev_params, 461 .show_configfs_dev_params = rd_show_configfs_dev_params,
486 .get_device_rev = rd_get_device_rev, 462 .get_device_rev = rd_get_device_rev,
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 21458125fe51..933b38b6e563 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
24#define RDF_HAS_PAGE_COUNT 0x01 24#define RDF_HAS_PAGE_COUNT 0x01
25 25
26struct rd_dev { 26struct rd_dev {
27 struct se_device dev;
27 u32 rd_flags; 28 u32 rd_flags;
28 /* Unique Ramdisk Device ID in Ramdisk HBA */ 29 /* Unique Ramdisk Device ID in Ramdisk HBA */
29 u32 rd_dev_id; 30 u32 rd_dev_id;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a6e27d967c7b..035193d04fa2 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -54,10 +54,10 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
54 buf[1] = (blocks >> 16) & 0xff; 54 buf[1] = (blocks >> 16) & 0xff;
55 buf[2] = (blocks >> 8) & 0xff; 55 buf[2] = (blocks >> 8) & 0xff;
56 buf[3] = blocks & 0xff; 56 buf[3] = blocks & 0xff;
57 buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 57 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
58 buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 58 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
59 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 59 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
60 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 60 buf[7] = dev->dev_attrib.block_size & 0xff;
61 61
62 rbuf = transport_kmap_data_sg(cmd); 62 rbuf = transport_kmap_data_sg(cmd);
63 if (rbuf) { 63 if (rbuf) {
@@ -85,15 +85,15 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
85 buf[5] = (blocks >> 16) & 0xff; 85 buf[5] = (blocks >> 16) & 0xff;
86 buf[6] = (blocks >> 8) & 0xff; 86 buf[6] = (blocks >> 8) & 0xff;
87 buf[7] = blocks & 0xff; 87 buf[7] = blocks & 0xff;
88 buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 88 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
89 buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 89 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
90 buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 90 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
91 buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 91 buf[11] = dev->dev_attrib.block_size & 0xff;
92 /* 92 /*
93 * Set Thin Provisioning Enable bit following sbc3r22 in section 93 * Set Thin Provisioning Enable bit following sbc3r22 in section
94 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 94 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
95 */ 95 */
96 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 96 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
97 buf[14] = 0x80; 97 buf[14] = 0x80;
98 98
99 rbuf = transport_kmap_data_sg(cmd); 99 rbuf = transport_kmap_data_sg(cmd);
@@ -143,7 +143,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd)
143 143
144static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 144static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
145{ 145{
146 return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; 146 return cmd->se_dev->dev_attrib.block_size * sectors;
147} 147}
148 148
149static int sbc_check_valid_sectors(struct se_cmd *cmd) 149static int sbc_check_valid_sectors(struct se_cmd *cmd)
@@ -152,7 +152,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd)
152 unsigned long long end_lba; 152 unsigned long long end_lba;
153 u32 sectors; 153 u32 sectors;
154 154
155 sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; 155 sectors = cmd->data_length / dev->dev_attrib.block_size;
156 end_lba = dev->transport->get_blocks(dev) + 1; 156 end_lba = dev->transport->get_blocks(dev) + 1;
157 157
158 if (cmd->t_task_lba + sectors > end_lba) { 158 if (cmd->t_task_lba + sectors > end_lba) {
@@ -315,7 +315,6 @@ out:
315 315
316int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) 316int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
317{ 317{
318 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
319 struct se_device *dev = cmd->se_dev; 318 struct se_device *dev = cmd->se_dev;
320 unsigned char *cdb = cmd->t_task_cdb; 319 unsigned char *cdb = cmd->t_task_cdb;
321 unsigned int size; 320 unsigned int size;
@@ -562,18 +561,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
562 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 561 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
563 unsigned long long end_lba; 562 unsigned long long end_lba;
564 563
565 if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { 564 if (sectors > dev->dev_attrib.fabric_max_sectors) {
566 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 565 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
567 " big sectors %u exceeds fabric_max_sectors:" 566 " big sectors %u exceeds fabric_max_sectors:"
568 " %u\n", cdb[0], sectors, 567 " %u\n", cdb[0], sectors,
569 su_dev->se_dev_attrib.fabric_max_sectors); 568 dev->dev_attrib.fabric_max_sectors);
570 goto out_invalid_cdb_field; 569 goto out_invalid_cdb_field;
571 } 570 }
572 if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { 571 if (sectors > dev->dev_attrib.hw_max_sectors) {
573 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 572 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
574 " big sectors %u exceeds backend hw_max_sectors:" 573 " big sectors %u exceeds backend hw_max_sectors:"
575 " %u\n", cdb[0], sectors, 574 " %u\n", cdb[0], sectors,
576 su_dev->se_dev_attrib.hw_max_sectors); 575 dev->dev_attrib.hw_max_sectors);
577 goto out_invalid_cdb_field; 576 goto out_invalid_cdb_field;
578 } 577 }
579 578
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 6fd434d3d7e4..0af45ae32f8c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -95,14 +95,14 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
95 /* 95 /*
96 * Enable SCCS and TPGS fields for Emulated ALUA 96 * Enable SCCS and TPGS fields for Emulated ALUA
97 */ 97 */
98 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) 98 if (dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
99 spc_fill_alua_data(lun->lun_sep, buf); 99 spc_fill_alua_data(lun->lun_sep, buf);
100 100
101 buf[7] = 0x2; /* CmdQue=1 */ 101 buf[7] = 0x2; /* CmdQue=1 */
102 102
103 snprintf(&buf[8], 8, "LIO-ORG"); 103 snprintf(&buf[8], 8, "LIO-ORG");
104 snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model); 104 snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
105 snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision); 105 snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
106 buf[4] = 31; /* Set additional length to 31 */ 106 buf[4] = 31; /* Set additional length to 31 */
107 107
108 return 0; 108 return 0;
@@ -114,15 +114,13 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
114 struct se_device *dev = cmd->se_dev; 114 struct se_device *dev = cmd->se_dev;
115 u16 len = 0; 115 u16 len = 0;
116 116
117 if (dev->se_sub_dev->su_dev_flags & 117 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
118 SDF_EMULATED_VPD_UNIT_SERIAL) {
119 u32 unit_serial_len; 118 u32 unit_serial_len;
120 119
121 unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial); 120 unit_serial_len = strlen(dev->t10_wwn.unit_serial);
122 unit_serial_len++; /* For NULL Terminator */ 121 unit_serial_len++; /* For NULL Terminator */
123 122
124 len += sprintf(&buf[4], "%s", 123 len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
125 dev->se_sub_dev->t10_wwn.unit_serial);
126 len++; /* Extra Byte for NULL Terminator */ 124 len++; /* Extra Byte for NULL Terminator */
127 buf[3] = len; 125 buf[3] = len;
128 } 126 }
@@ -132,7 +130,7 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
132static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 130static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
133 unsigned char *buf) 131 unsigned char *buf)
134{ 132{
135 unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; 133 unsigned char *p = &dev->t10_wwn.unit_serial[0];
136 int cnt; 134 int cnt;
137 bool next = true; 135 bool next = true;
138 136
@@ -173,7 +171,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
173 struct t10_alua_lu_gp_member *lu_gp_mem; 171 struct t10_alua_lu_gp_member *lu_gp_mem;
174 struct t10_alua_tg_pt_gp *tg_pt_gp; 172 struct t10_alua_tg_pt_gp *tg_pt_gp;
175 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 173 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
176 unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; 174 unsigned char *prod = &dev->t10_wwn.model[0];
177 u32 prod_len; 175 u32 prod_len;
178 u32 unit_serial_len, off = 0; 176 u32 unit_serial_len, off = 0;
179 u16 len = 0, id_len; 177 u16 len = 0, id_len;
@@ -188,7 +186,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
188 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 186 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
189 * value in order to return the NAA id. 187 * value in order to return the NAA id.
190 */ 188 */
191 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) 189 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
192 goto check_t10_vend_desc; 190 goto check_t10_vend_desc;
193 191
194 /* CODE SET == Binary */ 192 /* CODE SET == Binary */
@@ -236,14 +234,12 @@ check_t10_vend_desc:
236 prod_len += strlen(prod); 234 prod_len += strlen(prod);
237 prod_len++; /* For : */ 235 prod_len++; /* For : */
238 236
239 if (dev->se_sub_dev->su_dev_flags & 237 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
240 SDF_EMULATED_VPD_UNIT_SERIAL) { 238 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
241 unit_serial_len =
242 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
243 unit_serial_len++; /* For NULL Terminator */ 239 unit_serial_len++; /* For NULL Terminator */
244 240
245 id_len += sprintf(&buf[off+12], "%s:%s", prod, 241 id_len += sprintf(&buf[off+12], "%s:%s", prod,
246 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 242 &dev->t10_wwn.unit_serial[0]);
247 } 243 }
248 buf[off] = 0x2; /* ASCII */ 244 buf[off] = 0x2; /* ASCII */
249 buf[off+1] = 0x1; /* T10 Vendor ID */ 245 buf[off+1] = 0x1; /* T10 Vendor ID */
@@ -298,8 +294,7 @@ check_t10_vend_desc:
298 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 294 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
299 * section 7.5.1 Table 362 295 * section 7.5.1 Table 362
300 */ 296 */
301 if (dev->se_sub_dev->t10_alua.alua_type != 297 if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
302 SPC3_ALUA_EMULATED)
303 goto check_scsi_name; 298 goto check_scsi_name;
304 299
305 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 300 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
@@ -422,7 +417,7 @@ static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
422 buf[5] = 0x07; 417 buf[5] = 0x07;
423 418
424 /* If WriteCache emulation is enabled, set V_SUP */ 419 /* If WriteCache emulation is enabled, set V_SUP */
425 if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) 420 if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
426 buf[6] = 0x01; 421 buf[6] = 0x01;
427 return 0; 422 return 0;
428} 423}
@@ -439,7 +434,7 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
439 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 434 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
440 * different page length for Thin Provisioning. 435 * different page length for Thin Provisioning.
441 */ 436 */
442 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 437 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
443 have_tp = 1; 438 have_tp = 1;
444 439
445 buf[0] = dev->transport->get_device_type(dev); 440 buf[0] = dev->transport->get_device_type(dev);
@@ -456,14 +451,14 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
456 /* 451 /*
457 * Set MAXIMUM TRANSFER LENGTH 452 * Set MAXIMUM TRANSFER LENGTH
458 */ 453 */
459 max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, 454 max_sectors = min(dev->dev_attrib.fabric_max_sectors,
460 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 455 dev->dev_attrib.hw_max_sectors);
461 put_unaligned_be32(max_sectors, &buf[8]); 456 put_unaligned_be32(max_sectors, &buf[8]);
462 457
463 /* 458 /*
464 * Set OPTIMAL TRANSFER LENGTH 459 * Set OPTIMAL TRANSFER LENGTH
465 */ 460 */
466 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); 461 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
467 462
468 /* 463 /*
469 * Exit now if we don't support TP. 464 * Exit now if we don't support TP.
@@ -474,25 +469,25 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
474 /* 469 /*
475 * Set MAXIMUM UNMAP LBA COUNT 470 * Set MAXIMUM UNMAP LBA COUNT
476 */ 471 */
477 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); 472 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
478 473
479 /* 474 /*
480 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 475 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
481 */ 476 */
482 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, 477 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
483 &buf[24]); 478 &buf[24]);
484 479
485 /* 480 /*
486 * Set OPTIMAL UNMAP GRANULARITY 481 * Set OPTIMAL UNMAP GRANULARITY
487 */ 482 */
488 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); 483 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
489 484
490 /* 485 /*
491 * UNMAP GRANULARITY ALIGNMENT 486 * UNMAP GRANULARITY ALIGNMENT
492 */ 487 */
493 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, 488 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
494 &buf[32]); 489 &buf[32]);
495 if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) 490 if (dev->dev_attrib.unmap_granularity_alignment != 0)
496 buf[32] |= 0x80; /* Set the UGAVALID bit */ 491 buf[32] |= 0x80; /* Set the UGAVALID bit */
497 492
498 return 0; 493 return 0;
@@ -505,7 +500,7 @@ static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
505 500
506 buf[0] = dev->transport->get_device_type(dev); 501 buf[0] = dev->transport->get_device_type(dev);
507 buf[3] = 0x3c; 502 buf[3] = 0x3c;
508 buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0; 503 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
509 504
510 return 0; 505 return 0;
511} 506}
@@ -546,7 +541,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
546 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 541 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
547 * that the device server does not support the UNMAP command. 542 * that the device server does not support the UNMAP command.
548 */ 543 */
549 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) 544 if (dev->dev_attrib.emulate_tpu != 0)
550 buf[5] = 0x80; 545 buf[5] = 0x80;
551 546
552 /* 547 /*
@@ -555,7 +550,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
555 * A TPWS bit set to zero indicates that the device server does not 550 * A TPWS bit set to zero indicates that the device server does not
556 * support the use of the WRITE SAME (16) command to unmap LBAs. 551 * support the use of the WRITE SAME (16) command to unmap LBAs.
557 */ 552 */
558 if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) 553 if (dev->dev_attrib.emulate_tpws != 0)
559 buf[5] |= 0x40; 554 buf[5] |= 0x40;
560 555
561 return 0; 556 return 0;
@@ -586,8 +581,7 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
586 * Registered Extended LUN WWN has been set via ConfigFS 581 * Registered Extended LUN WWN has been set via ConfigFS
587 * during device creation/restart. 582 * during device creation/restart.
588 */ 583 */
589 if (cmd->se_dev->se_sub_dev->su_dev_flags & 584 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
590 SDF_EMULATED_VPD_UNIT_SERIAL) {
591 buf[3] = ARRAY_SIZE(evpd_handlers); 585 buf[3] = ARRAY_SIZE(evpd_handlers);
592 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 586 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
593 buf[p + 4] = evpd_handlers[p].page; 587 buf[p + 4] = evpd_handlers[p].page;
@@ -690,7 +684,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
690 * command sequence order shall be explicitly handled by the application client 684 * command sequence order shall be explicitly handled by the application client
691 * through the selection of appropriate ommands and task attributes. 685 * through the selection of appropriate ommands and task attributes.
692 */ 686 */
693 p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 687 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
694 /* 688 /*
695 * From spc4r17, section 7.4.6 Control mode Page 689 * From spc4r17, section 7.4.6 Control mode Page
696 * 690 *
@@ -720,8 +714,8 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
720 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 714 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
721 * to the number of commands completed with one of those status codes. 715 * to the number of commands completed with one of those status codes.
722 */ 716 */
723 p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 717 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
724 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 718 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
725 /* 719 /*
726 * From spc4r17, section 7.4.6 Control mode Page 720 * From spc4r17, section 7.4.6 Control mode Page
727 * 721 *
@@ -734,7 +728,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
734 * which the command was received shall be completed with TASK ABORTED 728 * which the command was received shall be completed with TASK ABORTED
735 * status (see SAM-4). 729 * status (see SAM-4).
736 */ 730 */
737 p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; 731 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
738 p[8] = 0xff; 732 p[8] = 0xff;
739 p[9] = 0xff; 733 p[9] = 0xff;
740 p[11] = 30; 734 p[11] = 30;
@@ -746,7 +740,7 @@ static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
746{ 740{
747 p[0] = 0x08; 741 p[0] = 0x08;
748 p[1] = 0x12; 742 p[1] = 0x12;
749 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) 743 if (dev->dev_attrib.emulate_write_cache > 0)
750 p[2] = 0x04; /* Write Cache Enable */ 744 p[2] = 0x04; /* Write Cache Enable */
751 p[12] = 0x20; /* Disabled Read Ahead */ 745 p[12] = 0x20; /* Disabled Read Ahead */
752 746
@@ -826,8 +820,8 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
826 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 820 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
827 spc_modesense_write_protect(&buf[3], type); 821 spc_modesense_write_protect(&buf[3], type);
828 822
829 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 823 if ((dev->dev_attrib.emulate_write_cache > 0) &&
830 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 824 (dev->dev_attrib.emulate_fua_write > 0))
831 spc_modesense_dpofua(&buf[3], type); 825 spc_modesense_dpofua(&buf[3], type);
832 } else { 826 } else {
833 offset -= 1; 827 offset -= 1;
@@ -839,8 +833,8 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
839 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 833 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
840 spc_modesense_write_protect(&buf[2], type); 834 spc_modesense_write_protect(&buf[2], type);
841 835
842 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 836 if ((dev->dev_attrib.emulate_write_cache > 0) &&
843 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 837 (dev->dev_attrib.emulate_fua_write > 0))
844 spc_modesense_dpofua(&buf[2], type); 838 spc_modesense_dpofua(&buf[2], type);
845 } 839 }
846 840
@@ -923,7 +917,6 @@ static int spc_emulate_testunitready(struct se_cmd *cmd)
923int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 917int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
924{ 918{
925 struct se_device *dev = cmd->se_dev; 919 struct se_device *dev = cmd->se_dev;
926 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
927 unsigned char *cdb = cmd->t_task_cdb; 920 unsigned char *cdb = cmd->t_task_cdb;
928 921
929 switch (cdb[0]) { 922 switch (cdb[0]) {
@@ -946,12 +939,12 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
946 *size = (cdb[7] << 8) + cdb[8]; 939 *size = (cdb[7] << 8) + cdb[8];
947 break; 940 break;
948 case PERSISTENT_RESERVE_IN: 941 case PERSISTENT_RESERVE_IN:
949 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 942 if (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
950 cmd->execute_cmd = target_scsi3_emulate_pr_in; 943 cmd->execute_cmd = target_scsi3_emulate_pr_in;
951 *size = (cdb[7] << 8) + cdb[8]; 944 *size = (cdb[7] << 8) + cdb[8];
952 break; 945 break;
953 case PERSISTENT_RESERVE_OUT: 946 case PERSISTENT_RESERVE_OUT:
954 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 947 if (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
955 cmd->execute_cmd = target_scsi3_emulate_pr_out; 948 cmd->execute_cmd = target_scsi3_emulate_pr_out;
956 *size = (cdb[7] << 8) + cdb[8]; 949 *size = (cdb[7] << 8) + cdb[8];
957 break; 950 break;
@@ -962,7 +955,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
962 else 955 else
963 *size = cmd->data_length; 956 *size = cmd->data_length;
964 957
965 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 958 if (dev->t10_pr.res_type != SPC_PASSTHROUGH)
966 cmd->execute_cmd = target_scsi2_reservation_release; 959 cmd->execute_cmd = target_scsi2_reservation_release;
967 break; 960 break;
968 case RESERVE: 961 case RESERVE:
@@ -983,7 +976,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
983 * is running in SPC_PASSTHROUGH, and wants reservations 976 * is running in SPC_PASSTHROUGH, and wants reservations
984 * emulation disabled. 977 * emulation disabled.
985 */ 978 */
986 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 979 if (dev->t10_pr.res_type != SPC_PASSTHROUGH)
987 cmd->execute_cmd = target_scsi2_reservation_reserve; 980 cmd->execute_cmd = target_scsi2_reservation_reserve;
988 break; 981 break;
989 case REQUEST_SENSE: 982 case REQUEST_SENSE:
@@ -1040,7 +1033,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1040 * Check for emulated MI_REPORT_TARGET_PGS 1033 * Check for emulated MI_REPORT_TARGET_PGS
1041 */ 1034 */
1042 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && 1035 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
1043 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 1036 dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
1044 cmd->execute_cmd = 1037 cmd->execute_cmd =
1045 target_emulate_report_target_port_groups; 1038 target_emulate_report_target_port_groups;
1046 } 1039 }
@@ -1059,7 +1052,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1059 * Check for emulated MO_SET_TARGET_PGS. 1052 * Check for emulated MO_SET_TARGET_PGS.
1060 */ 1053 */
1061 if (cdb[1] == MO_SET_TARGET_PGS && 1054 if (cdb[1] == MO_SET_TARGET_PGS &&
1062 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 1055 dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
1063 cmd->execute_cmd = 1056 cmd->execute_cmd =
1064 target_emulate_set_target_port_groups; 1057 target_emulate_set_target_port_groups;
1065 } 1058 }
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index cb6b0036ae95..9bbb0170b726 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -80,13 +80,9 @@ static struct target_stat_scsi_dev_attribute \
80static ssize_t target_stat_scsi_dev_show_attr_inst( 80static ssize_t target_stat_scsi_dev_show_attr_inst(
81 struct se_dev_stat_grps *sgrps, char *page) 81 struct se_dev_stat_grps *sgrps, char *page)
82{ 82{
83 struct se_subsystem_dev *se_subdev = container_of(sgrps, 83 struct se_device *dev =
84 struct se_subsystem_dev, dev_stat_grps); 84 container_of(sgrps, struct se_device, dev_stat_grps);
85 struct se_hba *hba = se_subdev->se_dev_hba; 85 struct se_hba *hba = dev->se_hba;
86 struct se_device *dev = se_subdev->se_dev_ptr;
87
88 if (!dev)
89 return -ENODEV;
90 86
91 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 87 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
92} 88}
@@ -95,12 +91,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(inst);
95static ssize_t target_stat_scsi_dev_show_attr_indx( 91static ssize_t target_stat_scsi_dev_show_attr_indx(
96 struct se_dev_stat_grps *sgrps, char *page) 92 struct se_dev_stat_grps *sgrps, char *page)
97{ 93{
98 struct se_subsystem_dev *se_subdev = container_of(sgrps, 94 struct se_device *dev =
99 struct se_subsystem_dev, dev_stat_grps); 95 container_of(sgrps, struct se_device, dev_stat_grps);
100 struct se_device *dev = se_subdev->se_dev_ptr;
101
102 if (!dev)
103 return -ENODEV;
104 96
105 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 97 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
106} 98}
@@ -109,13 +101,6 @@ DEV_STAT_SCSI_DEV_ATTR_RO(indx);
109static ssize_t target_stat_scsi_dev_show_attr_role( 101static ssize_t target_stat_scsi_dev_show_attr_role(
110 struct se_dev_stat_grps *sgrps, char *page) 102 struct se_dev_stat_grps *sgrps, char *page)
111{ 103{
112 struct se_subsystem_dev *se_subdev = container_of(sgrps,
113 struct se_subsystem_dev, dev_stat_grps);
114 struct se_device *dev = se_subdev->se_dev_ptr;
115
116 if (!dev)
117 return -ENODEV;
118
119 return snprintf(page, PAGE_SIZE, "Target\n"); 104 return snprintf(page, PAGE_SIZE, "Target\n");
120} 105}
121DEV_STAT_SCSI_DEV_ATTR_RO(role); 106DEV_STAT_SCSI_DEV_ATTR_RO(role);
@@ -123,12 +108,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(role);
123static ssize_t target_stat_scsi_dev_show_attr_ports( 108static ssize_t target_stat_scsi_dev_show_attr_ports(
124 struct se_dev_stat_grps *sgrps, char *page) 109 struct se_dev_stat_grps *sgrps, char *page)
125{ 110{
126 struct se_subsystem_dev *se_subdev = container_of(sgrps, 111 struct se_device *dev =
127 struct se_subsystem_dev, dev_stat_grps); 112 container_of(sgrps, struct se_device, dev_stat_grps);
128 struct se_device *dev = se_subdev->se_dev_ptr;
129
130 if (!dev)
131 return -ENODEV;
132 113
133 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); 114 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
134} 115}
@@ -176,13 +157,9 @@ static struct target_stat_scsi_tgt_dev_attribute \
176static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( 157static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
177 struct se_dev_stat_grps *sgrps, char *page) 158 struct se_dev_stat_grps *sgrps, char *page)
178{ 159{
179 struct se_subsystem_dev *se_subdev = container_of(sgrps, 160 struct se_device *dev =
180 struct se_subsystem_dev, dev_stat_grps); 161 container_of(sgrps, struct se_device, dev_stat_grps);
181 struct se_hba *hba = se_subdev->se_dev_hba; 162 struct se_hba *hba = dev->se_hba;
182 struct se_device *dev = se_subdev->se_dev_ptr;
183
184 if (!dev)
185 return -ENODEV;
186 163
187 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 164 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
188} 165}
@@ -191,12 +168,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
191static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( 168static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
192 struct se_dev_stat_grps *sgrps, char *page) 169 struct se_dev_stat_grps *sgrps, char *page)
193{ 170{
194 struct se_subsystem_dev *se_subdev = container_of(sgrps, 171 struct se_device *dev =
195 struct se_subsystem_dev, dev_stat_grps); 172 container_of(sgrps, struct se_device, dev_stat_grps);
196 struct se_device *dev = se_subdev->se_dev_ptr;
197
198 if (!dev)
199 return -ENODEV;
200 173
201 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 174 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
202} 175}
@@ -205,13 +178,6 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
205static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( 178static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
206 struct se_dev_stat_grps *sgrps, char *page) 179 struct se_dev_stat_grps *sgrps, char *page)
207{ 180{
208 struct se_subsystem_dev *se_subdev = container_of(sgrps,
209 struct se_subsystem_dev, dev_stat_grps);
210 struct se_device *dev = se_subdev->se_dev_ptr;
211
212 if (!dev)
213 return -ENODEV;
214
215 return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); 181 return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
216} 182}
217DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); 183DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
@@ -219,60 +185,27 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
219static ssize_t target_stat_scsi_tgt_dev_show_attr_status( 185static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
220 struct se_dev_stat_grps *sgrps, char *page) 186 struct se_dev_stat_grps *sgrps, char *page)
221{ 187{
222 struct se_subsystem_dev *se_subdev = container_of(sgrps, 188 struct se_device *dev =
223 struct se_subsystem_dev, dev_stat_grps); 189 container_of(sgrps, struct se_device, dev_stat_grps);
224 struct se_device *dev = se_subdev->se_dev_ptr;
225 char status[16];
226 190
227 if (!dev) 191 if (dev->export_count)
228 return -ENODEV; 192 return snprintf(page, PAGE_SIZE, "activated");
229 193 else
230 switch (dev->dev_status) { 194 return snprintf(page, PAGE_SIZE, "deactivated");
231 case TRANSPORT_DEVICE_ACTIVATED:
232 strcpy(status, "activated");
233 break;
234 case TRANSPORT_DEVICE_DEACTIVATED:
235 strcpy(status, "deactivated");
236 break;
237 case TRANSPORT_DEVICE_SHUTDOWN:
238 strcpy(status, "shutdown");
239 break;
240 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
241 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
242 strcpy(status, "offline");
243 break;
244 default:
245 sprintf(status, "unknown(%d)", dev->dev_status);
246 break;
247 }
248
249 return snprintf(page, PAGE_SIZE, "%s\n", status);
250} 195}
251DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); 196DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
252 197
253static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( 198static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
254 struct se_dev_stat_grps *sgrps, char *page) 199 struct se_dev_stat_grps *sgrps, char *page)
255{ 200{
256 struct se_subsystem_dev *se_subdev = container_of(sgrps, 201 struct se_device *dev =
257 struct se_subsystem_dev, dev_stat_grps); 202 container_of(sgrps, struct se_device, dev_stat_grps);
258 struct se_device *dev = se_subdev->se_dev_ptr;
259 int non_accessible_lus; 203 int non_accessible_lus;
260 204
261 if (!dev) 205 if (dev->export_count)
262 return -ENODEV;
263
264 switch (dev->dev_status) {
265 case TRANSPORT_DEVICE_ACTIVATED:
266 non_accessible_lus = 0; 206 non_accessible_lus = 0;
267 break; 207 else
268 case TRANSPORT_DEVICE_DEACTIVATED:
269 case TRANSPORT_DEVICE_SHUTDOWN:
270 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
271 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
272 default:
273 non_accessible_lus = 1; 208 non_accessible_lus = 1;
274 break;
275 }
276 209
277 return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); 210 return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
278} 211}
@@ -281,12 +214,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
281static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( 214static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
282 struct se_dev_stat_grps *sgrps, char *page) 215 struct se_dev_stat_grps *sgrps, char *page)
283{ 216{
284 struct se_subsystem_dev *se_subdev = container_of(sgrps, 217 struct se_device *dev =
285 struct se_subsystem_dev, dev_stat_grps); 218 container_of(sgrps, struct se_device, dev_stat_grps);
286 struct se_device *dev = se_subdev->se_dev_ptr;
287
288 if (!dev)
289 return -ENODEV;
290 219
291 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 220 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
292} 221}
@@ -335,13 +264,9 @@ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
335static ssize_t target_stat_scsi_lu_show_attr_inst( 264static ssize_t target_stat_scsi_lu_show_attr_inst(
336 struct se_dev_stat_grps *sgrps, char *page) 265 struct se_dev_stat_grps *sgrps, char *page)
337{ 266{
338 struct se_subsystem_dev *se_subdev = container_of(sgrps, 267 struct se_device *dev =
339 struct se_subsystem_dev, dev_stat_grps); 268 container_of(sgrps, struct se_device, dev_stat_grps);
340 struct se_hba *hba = se_subdev->se_dev_hba; 269 struct se_hba *hba = dev->se_hba;
341 struct se_device *dev = se_subdev->se_dev_ptr;
342
343 if (!dev)
344 return -ENODEV;
345 270
346 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 271 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
347} 272}
@@ -350,12 +275,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(inst);
350static ssize_t target_stat_scsi_lu_show_attr_dev( 275static ssize_t target_stat_scsi_lu_show_attr_dev(
351 struct se_dev_stat_grps *sgrps, char *page) 276 struct se_dev_stat_grps *sgrps, char *page)
352{ 277{
353 struct se_subsystem_dev *se_subdev = container_of(sgrps, 278 struct se_device *dev =
354 struct se_subsystem_dev, dev_stat_grps); 279 container_of(sgrps, struct se_device, dev_stat_grps);
355 struct se_device *dev = se_subdev->se_dev_ptr;
356
357 if (!dev)
358 return -ENODEV;
359 280
360 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 281 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
361} 282}
@@ -364,13 +285,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev);
364static ssize_t target_stat_scsi_lu_show_attr_indx( 285static ssize_t target_stat_scsi_lu_show_attr_indx(
365 struct se_dev_stat_grps *sgrps, char *page) 286 struct se_dev_stat_grps *sgrps, char *page)
366{ 287{
367 struct se_subsystem_dev *se_subdev = container_of(sgrps,
368 struct se_subsystem_dev, dev_stat_grps);
369 struct se_device *dev = se_subdev->se_dev_ptr;
370
371 if (!dev)
372 return -ENODEV;
373
374 return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); 288 return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
375} 289}
376DEV_STAT_SCSI_LU_ATTR_RO(indx); 290DEV_STAT_SCSI_LU_ATTR_RO(indx);
@@ -378,12 +292,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(indx);
378static ssize_t target_stat_scsi_lu_show_attr_lun( 292static ssize_t target_stat_scsi_lu_show_attr_lun(
379 struct se_dev_stat_grps *sgrps, char *page) 293 struct se_dev_stat_grps *sgrps, char *page)
380{ 294{
381 struct se_subsystem_dev *se_subdev = container_of(sgrps,
382 struct se_subsystem_dev, dev_stat_grps);
383 struct se_device *dev = se_subdev->se_dev_ptr;
384
385 if (!dev)
386 return -ENODEV;
387 /* FIXME: scsiLuDefaultLun */ 295 /* FIXME: scsiLuDefaultLun */
388 return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); 296 return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
389} 297}
@@ -392,35 +300,28 @@ DEV_STAT_SCSI_LU_ATTR_RO(lun);
392static ssize_t target_stat_scsi_lu_show_attr_lu_name( 300static ssize_t target_stat_scsi_lu_show_attr_lu_name(
393 struct se_dev_stat_grps *sgrps, char *page) 301 struct se_dev_stat_grps *sgrps, char *page)
394{ 302{
395 struct se_subsystem_dev *se_subdev = container_of(sgrps, 303 struct se_device *dev =
396 struct se_subsystem_dev, dev_stat_grps); 304 container_of(sgrps, struct se_device, dev_stat_grps);
397 struct se_device *dev = se_subdev->se_dev_ptr;
398 305
399 if (!dev)
400 return -ENODEV;
401 /* scsiLuWwnName */ 306 /* scsiLuWwnName */
402 return snprintf(page, PAGE_SIZE, "%s\n", 307 return snprintf(page, PAGE_SIZE, "%s\n",
403 (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? 308 (strlen(dev->t10_wwn.unit_serial)) ?
404 dev->se_sub_dev->t10_wwn.unit_serial : "None"); 309 dev->t10_wwn.unit_serial : "None");
405} 310}
406DEV_STAT_SCSI_LU_ATTR_RO(lu_name); 311DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
407 312
408static ssize_t target_stat_scsi_lu_show_attr_vend( 313static ssize_t target_stat_scsi_lu_show_attr_vend(
409 struct se_dev_stat_grps *sgrps, char *page) 314 struct se_dev_stat_grps *sgrps, char *page)
410{ 315{
411 struct se_subsystem_dev *se_subdev = container_of(sgrps, 316 struct se_device *dev =
412 struct se_subsystem_dev, dev_stat_grps); 317 container_of(sgrps, struct se_device, dev_stat_grps);
413 struct se_device *dev = se_subdev->se_dev_ptr;
414 int i; 318 int i;
415 char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; 319 char str[sizeof(dev->t10_wwn.vendor)+1];
416
417 if (!dev)
418 return -ENODEV;
419 320
420 /* scsiLuVendorId */ 321 /* scsiLuVendorId */
421 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) 322 for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
422 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? 323 str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
423 dev->se_sub_dev->t10_wwn.vendor[i] : ' '; 324 dev->t10_wwn.vendor[i] : ' ';
424 str[i] = '\0'; 325 str[i] = '\0';
425 return snprintf(page, PAGE_SIZE, "%s\n", str); 326 return snprintf(page, PAGE_SIZE, "%s\n", str);
426} 327}
@@ -429,19 +330,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(vend);
429static ssize_t target_stat_scsi_lu_show_attr_prod( 330static ssize_t target_stat_scsi_lu_show_attr_prod(
430 struct se_dev_stat_grps *sgrps, char *page) 331 struct se_dev_stat_grps *sgrps, char *page)
431{ 332{
432 struct se_subsystem_dev *se_subdev = container_of(sgrps, 333 struct se_device *dev =
433 struct se_subsystem_dev, dev_stat_grps); 334 container_of(sgrps, struct se_device, dev_stat_grps);
434 struct se_device *dev = se_subdev->se_dev_ptr;
435 int i; 335 int i;
436 char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; 336 char str[sizeof(dev->t10_wwn.model)+1];
437
438 if (!dev)
439 return -ENODEV;
440 337
441 /* scsiLuProductId */ 338 /* scsiLuProductId */
442 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) 339 for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
443 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? 340 str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
444 dev->se_sub_dev->t10_wwn.model[i] : ' '; 341 dev->t10_wwn.model[i] : ' ';
445 str[i] = '\0'; 342 str[i] = '\0';
446 return snprintf(page, PAGE_SIZE, "%s\n", str); 343 return snprintf(page, PAGE_SIZE, "%s\n", str);
447} 344}
@@ -450,19 +347,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(prod);
450static ssize_t target_stat_scsi_lu_show_attr_rev( 347static ssize_t target_stat_scsi_lu_show_attr_rev(
451 struct se_dev_stat_grps *sgrps, char *page) 348 struct se_dev_stat_grps *sgrps, char *page)
452{ 349{
453 struct se_subsystem_dev *se_subdev = container_of(sgrps, 350 struct se_device *dev =
454 struct se_subsystem_dev, dev_stat_grps); 351 container_of(sgrps, struct se_device, dev_stat_grps);
455 struct se_device *dev = se_subdev->se_dev_ptr;
456 int i; 352 int i;
457 char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; 353 char str[sizeof(dev->t10_wwn.revision)+1];
458
459 if (!dev)
460 return -ENODEV;
461 354
462 /* scsiLuRevisionId */ 355 /* scsiLuRevisionId */
463 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) 356 for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
464 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? 357 str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
465 dev->se_sub_dev->t10_wwn.revision[i] : ' '; 358 dev->t10_wwn.revision[i] : ' ';
466 str[i] = '\0'; 359 str[i] = '\0';
467 return snprintf(page, PAGE_SIZE, "%s\n", str); 360 return snprintf(page, PAGE_SIZE, "%s\n", str);
468} 361}
@@ -471,12 +364,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(rev);
471static ssize_t target_stat_scsi_lu_show_attr_dev_type( 364static ssize_t target_stat_scsi_lu_show_attr_dev_type(
472 struct se_dev_stat_grps *sgrps, char *page) 365 struct se_dev_stat_grps *sgrps, char *page)
473{ 366{
474 struct se_subsystem_dev *se_subdev = container_of(sgrps, 367 struct se_device *dev =
475 struct se_subsystem_dev, dev_stat_grps); 368 container_of(sgrps, struct se_device, dev_stat_grps);
476 struct se_device *dev = se_subdev->se_dev_ptr;
477
478 if (!dev)
479 return -ENODEV;
480 369
481 /* scsiLuPeripheralType */ 370 /* scsiLuPeripheralType */
482 return snprintf(page, PAGE_SIZE, "%u\n", 371 return snprintf(page, PAGE_SIZE, "%u\n",
@@ -487,30 +376,18 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
487static ssize_t target_stat_scsi_lu_show_attr_status( 376static ssize_t target_stat_scsi_lu_show_attr_status(
488 struct se_dev_stat_grps *sgrps, char *page) 377 struct se_dev_stat_grps *sgrps, char *page)
489{ 378{
490 struct se_subsystem_dev *se_subdev = container_of(sgrps, 379 struct se_device *dev =
491 struct se_subsystem_dev, dev_stat_grps); 380 container_of(sgrps, struct se_device, dev_stat_grps);
492 struct se_device *dev = se_subdev->se_dev_ptr;
493
494 if (!dev)
495 return -ENODEV;
496 381
497 /* scsiLuStatus */ 382 /* scsiLuStatus */
498 return snprintf(page, PAGE_SIZE, "%s\n", 383 return snprintf(page, PAGE_SIZE, "%s\n",
499 (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? 384 (dev->export_count) ? "available" : "notavailable");
500 "available" : "notavailable");
501} 385}
502DEV_STAT_SCSI_LU_ATTR_RO(status); 386DEV_STAT_SCSI_LU_ATTR_RO(status);
503 387
504static ssize_t target_stat_scsi_lu_show_attr_state_bit( 388static ssize_t target_stat_scsi_lu_show_attr_state_bit(
505 struct se_dev_stat_grps *sgrps, char *page) 389 struct se_dev_stat_grps *sgrps, char *page)
506{ 390{
507 struct se_subsystem_dev *se_subdev = container_of(sgrps,
508 struct se_subsystem_dev, dev_stat_grps);
509 struct se_device *dev = se_subdev->se_dev_ptr;
510
511 if (!dev)
512 return -ENODEV;
513
514 /* scsiLuState */ 391 /* scsiLuState */
515 return snprintf(page, PAGE_SIZE, "exposed\n"); 392 return snprintf(page, PAGE_SIZE, "exposed\n");
516} 393}
@@ -519,12 +396,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
519static ssize_t target_stat_scsi_lu_show_attr_num_cmds( 396static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
520 struct se_dev_stat_grps *sgrps, char *page) 397 struct se_dev_stat_grps *sgrps, char *page)
521{ 398{
522 struct se_subsystem_dev *se_subdev = container_of(sgrps, 399 struct se_device *dev =
523 struct se_subsystem_dev, dev_stat_grps); 400 container_of(sgrps, struct se_device, dev_stat_grps);
524 struct se_device *dev = se_subdev->se_dev_ptr;
525
526 if (!dev)
527 return -ENODEV;
528 401
529 /* scsiLuNumCommands */ 402 /* scsiLuNumCommands */
530 return snprintf(page, PAGE_SIZE, "%llu\n", 403 return snprintf(page, PAGE_SIZE, "%llu\n",
@@ -535,12 +408,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
535static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( 408static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
536 struct se_dev_stat_grps *sgrps, char *page) 409 struct se_dev_stat_grps *sgrps, char *page)
537{ 410{
538 struct se_subsystem_dev *se_subdev = container_of(sgrps, 411 struct se_device *dev =
539 struct se_subsystem_dev, dev_stat_grps); 412 container_of(sgrps, struct se_device, dev_stat_grps);
540 struct se_device *dev = se_subdev->se_dev_ptr;
541
542 if (!dev)
543 return -ENODEV;
544 413
545 /* scsiLuReadMegaBytes */ 414 /* scsiLuReadMegaBytes */
546 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); 415 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
@@ -550,12 +419,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
550static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( 419static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
551 struct se_dev_stat_grps *sgrps, char *page) 420 struct se_dev_stat_grps *sgrps, char *page)
552{ 421{
553 struct se_subsystem_dev *se_subdev = container_of(sgrps, 422 struct se_device *dev =
554 struct se_subsystem_dev, dev_stat_grps); 423 container_of(sgrps, struct se_device, dev_stat_grps);
555 struct se_device *dev = se_subdev->se_dev_ptr;
556
557 if (!dev)
558 return -ENODEV;
559 424
560 /* scsiLuWrittenMegaBytes */ 425 /* scsiLuWrittenMegaBytes */
561 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); 426 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
@@ -565,12 +430,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
565static ssize_t target_stat_scsi_lu_show_attr_resets( 430static ssize_t target_stat_scsi_lu_show_attr_resets(
566 struct se_dev_stat_grps *sgrps, char *page) 431 struct se_dev_stat_grps *sgrps, char *page)
567{ 432{
568 struct se_subsystem_dev *se_subdev = container_of(sgrps, 433 struct se_device *dev =
569 struct se_subsystem_dev, dev_stat_grps); 434 container_of(sgrps, struct se_device, dev_stat_grps);
570 struct se_device *dev = se_subdev->se_dev_ptr;
571
572 if (!dev)
573 return -ENODEV;
574 435
575 /* scsiLuInResets */ 436 /* scsiLuInResets */
576 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 437 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
@@ -580,13 +441,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(resets);
580static ssize_t target_stat_scsi_lu_show_attr_full_stat( 441static ssize_t target_stat_scsi_lu_show_attr_full_stat(
581 struct se_dev_stat_grps *sgrps, char *page) 442 struct se_dev_stat_grps *sgrps, char *page)
582{ 443{
583 struct se_subsystem_dev *se_subdev = container_of(sgrps,
584 struct se_subsystem_dev, dev_stat_grps);
585 struct se_device *dev = se_subdev->se_dev_ptr;
586
587 if (!dev)
588 return -ENODEV;
589
590 /* FIXME: scsiLuOutTaskSetFullStatus */ 444 /* FIXME: scsiLuOutTaskSetFullStatus */
591 return snprintf(page, PAGE_SIZE, "%u\n", 0); 445 return snprintf(page, PAGE_SIZE, "%u\n", 0);
592} 446}
@@ -595,13 +449,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
595static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( 449static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
596 struct se_dev_stat_grps *sgrps, char *page) 450 struct se_dev_stat_grps *sgrps, char *page)
597{ 451{
598 struct se_subsystem_dev *se_subdev = container_of(sgrps,
599 struct se_subsystem_dev, dev_stat_grps);
600 struct se_device *dev = se_subdev->se_dev_ptr;
601
602 if (!dev)
603 return -ENODEV;
604
605 /* FIXME: scsiLuHSInCommands */ 452 /* FIXME: scsiLuHSInCommands */
606 return snprintf(page, PAGE_SIZE, "%u\n", 0); 453 return snprintf(page, PAGE_SIZE, "%u\n", 0);
607} 454}
@@ -610,12 +457,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
610static ssize_t target_stat_scsi_lu_show_attr_creation_time( 457static ssize_t target_stat_scsi_lu_show_attr_creation_time(
611 struct se_dev_stat_grps *sgrps, char *page) 458 struct se_dev_stat_grps *sgrps, char *page)
612{ 459{
613 struct se_subsystem_dev *se_subdev = container_of(sgrps, 460 struct se_device *dev =
614 struct se_subsystem_dev, dev_stat_grps); 461 container_of(sgrps, struct se_device, dev_stat_grps);
615 struct se_device *dev = se_subdev->se_dev_ptr;
616
617 if (!dev)
618 return -ENODEV;
619 462
620 /* scsiLuCreationTime */ 463 /* scsiLuCreationTime */
621 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - 464 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
@@ -662,20 +505,20 @@ static struct config_item_type target_stat_scsi_lu_cit = {
662 * Called from target_core_configfs.c:target_core_make_subdev() to setup 505 * Called from target_core_configfs.c:target_core_make_subdev() to setup
663 * the target statistics groups + configfs CITs located in target_core_stat.c 506 * the target statistics groups + configfs CITs located in target_core_stat.c
664 */ 507 */
665void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) 508void target_stat_setup_dev_default_groups(struct se_device *dev)
666{ 509{
667 struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; 510 struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
668 511
669 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, 512 config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
670 "scsi_dev", &target_stat_scsi_dev_cit); 513 "scsi_dev", &target_stat_scsi_dev_cit);
671 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, 514 config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
672 "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); 515 "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
673 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, 516 config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
674 "scsi_lu", &target_stat_scsi_lu_cit); 517 "scsi_lu", &target_stat_scsi_lu_cit);
675 518
676 dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; 519 dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
677 dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; 520 dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
678 dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; 521 dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
679 dev_stat_grp->default_groups[3] = NULL; 522 dev_stat_grp->default_groups[3] = NULL;
680} 523}
681 524
@@ -1161,7 +1004,7 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
1161 return -ENODEV; 1004 return -ENODEV;
1162 } 1005 }
1163 tpg = sep->sep_tpg; 1006 tpg = sep->sep_tpg;
1164 wwn = &dev->se_sub_dev->t10_wwn; 1007 wwn = &dev->t10_wwn;
1165 /* scsiTransportDevName */ 1008 /* scsiTransportDevName */
1166 ret = snprintf(page, PAGE_SIZE, "%s+%s\n", 1009 ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
1167 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1010 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index be75c4331a92..2aaceae964ee 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -371,7 +371,7 @@ int core_tmr_lun_reset(
371 * which the command was received shall be completed with TASK ABORTED 371 * which the command was received shall be completed with TASK ABORTED
372 * status (see SAM-4). 372 * status (see SAM-4).
373 */ 373 */
374 tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; 374 tas = dev->dev_attrib.emulate_tas;
375 /* 375 /*
376 * Determine if this se_tmr is coming from a $FABRIC_MOD 376 * Determine if this se_tmr is coming from a $FABRIC_MOD
377 * or struct se_device passthrough.. 377 * or struct se_device passthrough..
@@ -399,10 +399,10 @@ int core_tmr_lun_reset(
399 * LOGICAL UNIT RESET 399 * LOGICAL UNIT RESET
400 */ 400 */
401 if (!preempt_and_abort_list && 401 if (!preempt_and_abort_list &&
402 (dev->dev_flags & DF_SPC2_RESERVATIONS)) { 402 (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
403 spin_lock(&dev->dev_reservation_lock); 403 spin_lock(&dev->dev_reservation_lock);
404 dev->dev_reserved_node_acl = NULL; 404 dev->dev_reserved_node_acl = NULL;
405 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 405 dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
406 spin_unlock(&dev->dev_reservation_lock); 406 spin_unlock(&dev->dev_reservation_lock);
407 pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 407 pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
408 } 408 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9097155e9ebe..d6d24a47ed66 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -659,7 +659,7 @@ static void target_add_to_state_list(struct se_cmd *cmd)
659static void transport_write_pending_qf(struct se_cmd *cmd); 659static void transport_write_pending_qf(struct se_cmd *cmd);
660static void transport_complete_qf(struct se_cmd *cmd); 660static void transport_complete_qf(struct se_cmd *cmd);
661 661
662static void target_qf_do_work(struct work_struct *work) 662void target_qf_do_work(struct work_struct *work)
663{ 663{
664 struct se_device *dev = container_of(work, struct se_device, 664 struct se_device *dev = container_of(work, struct se_device,
665 qf_work_queue); 665 qf_work_queue);
@@ -712,29 +712,15 @@ void transport_dump_dev_state(
712 int *bl) 712 int *bl)
713{ 713{
714 *bl += sprintf(b + *bl, "Status: "); 714 *bl += sprintf(b + *bl, "Status: ");
715 switch (dev->dev_status) { 715 if (dev->export_count)
716 case TRANSPORT_DEVICE_ACTIVATED:
717 *bl += sprintf(b + *bl, "ACTIVATED"); 716 *bl += sprintf(b + *bl, "ACTIVATED");
718 break; 717 else
719 case TRANSPORT_DEVICE_DEACTIVATED:
720 *bl += sprintf(b + *bl, "DEACTIVATED"); 718 *bl += sprintf(b + *bl, "DEACTIVATED");
721 break;
722 case TRANSPORT_DEVICE_SHUTDOWN:
723 *bl += sprintf(b + *bl, "SHUTDOWN");
724 break;
725 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
726 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
727 *bl += sprintf(b + *bl, "OFFLINE");
728 break;
729 default:
730 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
731 break;
732 }
733 719
734 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 720 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
735 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 721 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
736 dev->se_sub_dev->se_dev_attrib.block_size, 722 dev->dev_attrib.block_size,
737 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 723 dev->dev_attrib.hw_max_sectors);
738 *bl += sprintf(b + *bl, " "); 724 *bl += sprintf(b + *bl, " ");
739} 725}
740 726
@@ -991,185 +977,6 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
991} 977}
992EXPORT_SYMBOL(transport_set_vpd_ident); 978EXPORT_SYMBOL(transport_set_vpd_ident);
993 979
994static void core_setup_task_attr_emulation(struct se_device *dev)
995{
996 /*
997 * If this device is from Target_Core_Mod/pSCSI, disable the
998 * SAM Task Attribute emulation.
999 *
1000 * This is currently not available in upsream Linux/SCSI Target
1001 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1002 */
1003 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1004 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1005 return;
1006 }
1007
1008 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1009 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1010 " device\n", dev->transport->name,
1011 dev->transport->get_device_rev(dev));
1012}
1013
1014static void scsi_dump_inquiry(struct se_device *dev)
1015{
1016 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1017 char buf[17];
1018 int i, device_type;
1019 /*
1020 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1021 */
1022 for (i = 0; i < 8; i++)
1023 if (wwn->vendor[i] >= 0x20)
1024 buf[i] = wwn->vendor[i];
1025 else
1026 buf[i] = ' ';
1027 buf[i] = '\0';
1028 pr_debug(" Vendor: %s\n", buf);
1029
1030 for (i = 0; i < 16; i++)
1031 if (wwn->model[i] >= 0x20)
1032 buf[i] = wwn->model[i];
1033 else
1034 buf[i] = ' ';
1035 buf[i] = '\0';
1036 pr_debug(" Model: %s\n", buf);
1037
1038 for (i = 0; i < 4; i++)
1039 if (wwn->revision[i] >= 0x20)
1040 buf[i] = wwn->revision[i];
1041 else
1042 buf[i] = ' ';
1043 buf[i] = '\0';
1044 pr_debug(" Revision: %s\n", buf);
1045
1046 device_type = dev->transport->get_device_type(dev);
1047 pr_debug(" Type: %s ", scsi_device_type(device_type));
1048 pr_debug(" ANSI SCSI revision: %02x\n",
1049 dev->transport->get_device_rev(dev));
1050}
1051
1052struct se_device *transport_add_device_to_core_hba(
1053 struct se_hba *hba,
1054 struct se_subsystem_api *transport,
1055 struct se_subsystem_dev *se_dev,
1056 u32 device_flags,
1057 void *transport_dev,
1058 struct se_dev_limits *dev_limits,
1059 const char *inquiry_prod,
1060 const char *inquiry_rev)
1061{
1062 int force_pt;
1063 struct se_device *dev;
1064
1065 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1066 if (!dev) {
1067 pr_err("Unable to allocate memory for se_dev_t\n");
1068 return NULL;
1069 }
1070
1071 dev->dev_flags = device_flags;
1072 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1073 dev->dev_ptr = transport_dev;
1074 dev->se_hba = hba;
1075 dev->se_sub_dev = se_dev;
1076 dev->transport = transport;
1077 INIT_LIST_HEAD(&dev->dev_list);
1078 INIT_LIST_HEAD(&dev->dev_sep_list);
1079 INIT_LIST_HEAD(&dev->dev_tmr_list);
1080 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1081 INIT_LIST_HEAD(&dev->state_list);
1082 INIT_LIST_HEAD(&dev->qf_cmd_list);
1083 spin_lock_init(&dev->execute_task_lock);
1084 spin_lock_init(&dev->delayed_cmd_lock);
1085 spin_lock_init(&dev->dev_reservation_lock);
1086 spin_lock_init(&dev->dev_status_lock);
1087 spin_lock_init(&dev->se_port_lock);
1088 spin_lock_init(&dev->se_tmr_lock);
1089 spin_lock_init(&dev->qf_cmd_lock);
1090 atomic_set(&dev->dev_ordered_id, 0);
1091
1092 se_dev_set_default_attribs(dev, dev_limits);
1093
1094 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1095 dev->creation_time = get_jiffies_64();
1096 spin_lock_init(&dev->stats_lock);
1097
1098 spin_lock(&hba->device_lock);
1099 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1100 hba->dev_count++;
1101 spin_unlock(&hba->device_lock);
1102 /*
1103 * Setup the SAM Task Attribute emulation for struct se_device
1104 */
1105 core_setup_task_attr_emulation(dev);
1106 /*
1107 * Force PR and ALUA passthrough emulation with internal object use.
1108 */
1109 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1110 /*
1111 * Setup the Reservations infrastructure for struct se_device
1112 */
1113 core_setup_reservations(dev, force_pt);
1114 /*
1115 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1116 */
1117 if (core_setup_alua(dev, force_pt) < 0)
1118 goto err_dev_list;
1119
1120 /*
1121 * Startup the struct se_device processing thread
1122 */
1123 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1124 dev->transport->name);
1125 if (!dev->tmr_wq) {
1126 pr_err("Unable to create tmr workqueue for %s\n",
1127 dev->transport->name);
1128 goto err_dev_list;
1129 }
1130 /*
1131 * Setup work_queue for QUEUE_FULL
1132 */
1133 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1134 /*
1135 * Preload the initial INQUIRY const values if we are doing
1136 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1137 * passthrough because this is being provided by the backend LLD.
1138 * This is required so that transport_get_inquiry() copies these
1139 * originals once back into DEV_T10_WWN(dev) for the virtual device
1140 * setup.
1141 */
1142 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1143 if (!inquiry_prod || !inquiry_rev) {
1144 pr_err("All non TCM/pSCSI plugins require"
1145 " INQUIRY consts\n");
1146 goto err_wq;
1147 }
1148
1149 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1150 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1151 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1152 }
1153 scsi_dump_inquiry(dev);
1154
1155 return dev;
1156
1157err_wq:
1158 destroy_workqueue(dev->tmr_wq);
1159err_dev_list:
1160 spin_lock(&hba->device_lock);
1161 list_del(&dev->dev_list);
1162 hba->dev_count--;
1163 spin_unlock(&hba->device_lock);
1164
1165 se_release_vpd_for_dev(dev);
1166
1167 kfree(dev);
1168
1169 return NULL;
1170}
1171EXPORT_SYMBOL(transport_add_device_to_core_hba);
1172
1173int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 980int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1174{ 981{
1175 struct se_device *dev = cmd->se_dev; 982 struct se_device *dev = cmd->se_dev;
@@ -1191,7 +998,7 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1191 * Reject READ_* or WRITE_* with overflow/underflow for 998 * Reject READ_* or WRITE_* with overflow/underflow for
1192 * type SCF_SCSI_DATA_CDB. 999 * type SCF_SCSI_DATA_CDB.
1193 */ 1000 */
1194 if (dev->se_sub_dev->se_dev_attrib.block_size != 512) { 1001 if (dev->dev_attrib.block_size != 512) {
1195 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1002 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1196 " CDB on non 512-byte sector setup subsystem" 1003 " CDB on non 512-byte sector setup subsystem"
1197 " plugin: %s\n", dev->transport->name); 1004 " plugin: %s\n", dev->transport->name);
@@ -1293,7 +1100,7 @@ int target_setup_cmd_from_cdb(
1293 struct se_cmd *cmd, 1100 struct se_cmd *cmd,
1294 unsigned char *cdb) 1101 unsigned char *cdb)
1295{ 1102{
1296 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 1103 struct se_device *dev = cmd->se_dev;
1297 u32 pr_reg_type = 0; 1104 u32 pr_reg_type = 0;
1298 u8 alua_ascq = 0; 1105 u8 alua_ascq = 0;
1299 unsigned long flags; 1106 unsigned long flags;
@@ -1345,7 +1152,7 @@ int target_setup_cmd_from_cdb(
1345 return -EINVAL; 1152 return -EINVAL;
1346 } 1153 }
1347 1154
1348 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); 1155 ret = dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
1349 if (ret != 0) { 1156 if (ret != 0) {
1350 /* 1157 /*
1351 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 1158 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
@@ -1371,8 +1178,8 @@ int target_setup_cmd_from_cdb(
1371 /* 1178 /*
1372 * Check status for SPC-3 Persistent Reservations 1179 * Check status for SPC-3 Persistent Reservations
1373 */ 1180 */
1374 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { 1181 if (dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
1375 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 1182 if (dev->t10_pr.pr_ops.t10_seq_non_holder(
1376 cmd, cdb, pr_reg_type) != 0) { 1183 cmd, cdb, pr_reg_type) != 0) {
1377 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1184 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1378 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; 1185 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
@@ -1387,7 +1194,7 @@ int target_setup_cmd_from_cdb(
1387 */ 1194 */
1388 } 1195 }
1389 1196
1390 ret = cmd->se_dev->transport->parse_cdb(cmd); 1197 ret = dev->transport->parse_cdb(cmd);
1391 if (ret < 0) 1198 if (ret < 0)
1392 return ret; 1199 return ret;
1393 1200
@@ -1759,7 +1566,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
1759 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1566 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1760 */ 1567 */
1761 if (cmd->se_sess && 1568 if (cmd->se_sess &&
1762 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) 1569 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
1763 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1570 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1764 cmd->orig_fe_lun, 0x2C, 1571 cmd->orig_fe_lun, 0x2C,
1765 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1572 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 6666a0c74f60..59c95ee14749 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -237,7 +237,7 @@ void core_scsi3_ua_for_check_condition(
237 * highest priority UNIT_ATTENTION and ASC/ASCQ without 237 * highest priority UNIT_ATTENTION and ASC/ASCQ without
238 * clearing it. 238 * clearing it.
239 */ 239 */
240 if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { 240 if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
241 *asc = ua->ua_asc; 241 *asc = ua->ua_asc;
242 *ascq = ua->ua_ascq; 242 *ascq = ua->ua_ascq;
243 break; 243 break;
@@ -265,8 +265,8 @@ void core_scsi3_ua_for_check_condition(
265 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" 265 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
266 " reported ASC: 0x%02x, ASCQ: 0x%02x\n", 266 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
267 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 267 nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
268 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 268 (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
269 "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, 269 "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
270 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); 270 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
271} 271}
272 272