aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_configfs.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-10-08 00:03:19 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2012-11-06 23:55:43 -0500
commit0fd97ccf45be26fb01b3a412f1f6c6b5044b2f16 (patch)
treec642e3da11e534a311a1e998ef740a3d44b9187b /drivers/target/target_core_configfs.c
parent3d70f8c617a436c7146ecb81df2265b4626dfe89 (diff)
target: kill struct se_subsystem_dev
Simplify the code a lot by killing the superflous struct se_subsystem_dev. Instead se_device is allocated early on by the backend driver, which allocates it as part of its own per-device structure, borrowing the scheme that is for example used for inode allocation. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_configfs.c')
-rw-r--r--drivers/target/target_core_configfs.c528
1 files changed, 152 insertions, 376 deletions
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c123327499a3..7272016ed05f 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -565,21 +565,8 @@ static ssize_t target_core_dev_show_attr_##_name( \
565 struct se_dev_attrib *da, \ 565 struct se_dev_attrib *da, \
566 char *page) \ 566 char *page) \
567{ \ 567{ \
568 struct se_device *dev; \ 568 return snprintf(page, PAGE_SIZE, "%u\n", \
569 struct se_subsystem_dev *se_dev = da->da_sub_dev; \ 569 (u32)da->da_dev->dev_attrib._name); \
570 ssize_t rb; \
571 \
572 spin_lock(&se_dev->se_dev_lock); \
573 dev = se_dev->se_dev_ptr; \
574 if (!dev) { \
575 spin_unlock(&se_dev->se_dev_lock); \
576 return -ENODEV; \
577 } \
578 rb = snprintf(page, PAGE_SIZE, "%u\n", \
579 (u32)dev->se_sub_dev->se_dev_attrib._name); \
580 spin_unlock(&se_dev->se_dev_lock); \
581 \
582 return rb; \
583} 570}
584 571
585#define DEF_DEV_ATTRIB_STORE(_name) \ 572#define DEF_DEV_ATTRIB_STORE(_name) \
@@ -588,26 +575,16 @@ static ssize_t target_core_dev_store_attr_##_name( \
588 const char *page, \ 575 const char *page, \
589 size_t count) \ 576 size_t count) \
590{ \ 577{ \
591 struct se_device *dev; \
592 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
593 unsigned long val; \ 578 unsigned long val; \
594 int ret; \ 579 int ret; \
595 \ 580 \
596 spin_lock(&se_dev->se_dev_lock); \
597 dev = se_dev->se_dev_ptr; \
598 if (!dev) { \
599 spin_unlock(&se_dev->se_dev_lock); \
600 return -ENODEV; \
601 } \
602 ret = strict_strtoul(page, 0, &val); \ 581 ret = strict_strtoul(page, 0, &val); \
603 if (ret < 0) { \ 582 if (ret < 0) { \
604 spin_unlock(&se_dev->se_dev_lock); \
605 pr_err("strict_strtoul() failed with" \ 583 pr_err("strict_strtoul() failed with" \
606 " ret: %d\n", ret); \ 584 " ret: %d\n", ret); \
607 return -EINVAL; \ 585 return -EINVAL; \
608 } \ 586 } \
609 ret = se_dev_set_##_name(dev, (u32)val); \ 587 ret = se_dev_set_##_name(da->da_dev, (u32)val); \
610 spin_unlock(&se_dev->se_dev_lock); \
611 \ 588 \
612 return (!ret) ? count : -EINVAL; \ 589 return (!ret) ? count : -EINVAL; \
613} 590}
@@ -764,13 +741,6 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
764 struct t10_wwn *t10_wwn, 741 struct t10_wwn *t10_wwn,
765 char *page) 742 char *page)
766{ 743{
767 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
768 struct se_device *dev;
769
770 dev = se_dev->se_dev_ptr;
771 if (!dev)
772 return -ENODEV;
773
774 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", 744 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
775 &t10_wwn->unit_serial[0]); 745 &t10_wwn->unit_serial[0]);
776} 746}
@@ -780,8 +750,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
780 const char *page, 750 const char *page,
781 size_t count) 751 size_t count)
782{ 752{
783 struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev; 753 struct se_device *dev = t10_wwn->t10_dev;
784 struct se_device *dev;
785 unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; 754 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
786 755
787 /* 756 /*
@@ -794,7 +763,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
794 * it is doing 'the right thing' wrt a world wide unique 763 * it is doing 'the right thing' wrt a world wide unique
795 * VPD Unit Serial Number that OS dependent multipath can depend on. 764 * VPD Unit Serial Number that OS dependent multipath can depend on.
796 */ 765 */
797 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { 766 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
798 pr_err("Underlying SCSI device firmware provided VPD" 767 pr_err("Underlying SCSI device firmware provided VPD"
799 " Unit Serial, ignoring request\n"); 768 " Unit Serial, ignoring request\n");
800 return -EOPNOTSUPP; 769 return -EOPNOTSUPP;
@@ -811,15 +780,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
811 * (underneath the initiator side OS dependent multipath code) 780 * (underneath the initiator side OS dependent multipath code)
812 * could cause negative effects. 781 * could cause negative effects.
813 */ 782 */
814 dev = su_dev->se_dev_ptr; 783 if (dev->export_count) {
815 if (dev) { 784 pr_err("Unable to set VPD Unit Serial while"
816 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 785 " active %d $FABRIC_MOD exports exist\n",
817 pr_err("Unable to set VPD Unit Serial while" 786 dev->export_count);
818 " active %d $FABRIC_MOD exports exist\n", 787 return -EINVAL;
819 atomic_read(&dev->dev_export_obj.obj_access_count));
820 return -EINVAL;
821 }
822 } 788 }
789
823 /* 790 /*
824 * This currently assumes ASCII encoding for emulated VPD Unit Serial. 791 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
825 * 792 *
@@ -828,12 +795,12 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
828 */ 795 */
829 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); 796 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
830 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); 797 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
831 snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, 798 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
832 "%s", strstrip(buf)); 799 "%s", strstrip(buf));
833 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; 800 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
834 801
835 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 802 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
836 " %s\n", su_dev->t10_wwn.unit_serial); 803 " %s\n", dev->t10_wwn.unit_serial);
837 804
838 return count; 805 return count;
839} 806}
@@ -847,16 +814,10 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
847 struct t10_wwn *t10_wwn, 814 struct t10_wwn *t10_wwn,
848 char *page) 815 char *page)
849{ 816{
850 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
851 struct se_device *dev;
852 struct t10_vpd *vpd; 817 struct t10_vpd *vpd;
853 unsigned char buf[VPD_TMP_BUF_SIZE]; 818 unsigned char buf[VPD_TMP_BUF_SIZE];
854 ssize_t len = 0; 819 ssize_t len = 0;
855 820
856 dev = se_dev->se_dev_ptr;
857 if (!dev)
858 return -ENODEV;
859
860 memset(buf, 0, VPD_TMP_BUF_SIZE); 821 memset(buf, 0, VPD_TMP_BUF_SIZE);
861 822
862 spin_lock(&t10_wwn->t10_vpd_lock); 823 spin_lock(&t10_wwn->t10_vpd_lock);
@@ -894,16 +855,10 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
894 struct t10_wwn *t10_wwn, \ 855 struct t10_wwn *t10_wwn, \
895 char *page) \ 856 char *page) \
896{ \ 857{ \
897 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
898 struct se_device *dev; \
899 struct t10_vpd *vpd; \ 858 struct t10_vpd *vpd; \
900 unsigned char buf[VPD_TMP_BUF_SIZE]; \ 859 unsigned char buf[VPD_TMP_BUF_SIZE]; \
901 ssize_t len = 0; \ 860 ssize_t len = 0; \
902 \ 861 \
903 dev = se_dev->se_dev_ptr; \
904 if (!dev) \
905 return -ENODEV; \
906 \
907 spin_lock(&t10_wwn->t10_vpd_lock); \ 862 spin_lock(&t10_wwn->t10_vpd_lock); \
908 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ 863 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
909 if (vpd->association != _assoc) \ 864 if (vpd->association != _assoc) \
@@ -1003,7 +958,7 @@ static struct config_item_type target_core_dev_wwn_cit = {
1003 958
1004/* Start functions for struct config_item_type target_core_dev_pr_cit */ 959/* Start functions for struct config_item_type target_core_dev_pr_cit */
1005 960
1006CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev); 961CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
1007#define SE_DEV_PR_ATTR(_name, _mode) \ 962#define SE_DEV_PR_ATTR(_name, _mode) \
1008static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ 963static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1009 __CONFIGFS_EATTR(_name, _mode, \ 964 __CONFIGFS_EATTR(_name, _mode, \
@@ -1071,23 +1026,17 @@ static ssize_t target_core_dev_pr_show_spc2_res(
1071 return *len; 1026 return *len;
1072} 1027}
1073 1028
1074static ssize_t target_core_dev_pr_show_attr_res_holder( 1029static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
1075 struct se_subsystem_dev *su_dev, 1030 char *page)
1076 char *page)
1077{ 1031{
1078 ssize_t len = 0; 1032 ssize_t len = 0;
1079 1033
1080 if (!su_dev->se_dev_ptr) 1034 switch (dev->t10_pr.res_type) {
1081 return -ENODEV;
1082
1083 switch (su_dev->t10_pr.res_type) {
1084 case SPC3_PERSISTENT_RESERVATIONS: 1035 case SPC3_PERSISTENT_RESERVATIONS:
1085 target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, 1036 target_core_dev_pr_show_spc3_res(dev, page, &len);
1086 page, &len);
1087 break; 1037 break;
1088 case SPC2_RESERVATIONS: 1038 case SPC2_RESERVATIONS:
1089 target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr, 1039 target_core_dev_pr_show_spc2_res(dev, page, &len);
1090 page, &len);
1091 break; 1040 break;
1092 case SPC_PASSTHROUGH: 1041 case SPC_PASSTHROUGH:
1093 len += sprintf(page+len, "Passthrough\n"); 1042 len += sprintf(page+len, "Passthrough\n");
@@ -1102,22 +1051,13 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(
1102 1051
1103SE_DEV_PR_ATTR_RO(res_holder); 1052SE_DEV_PR_ATTR_RO(res_holder);
1104 1053
1105/*
1106 * res_pr_all_tgt_pts
1107 */
1108static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( 1054static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1109 struct se_subsystem_dev *su_dev, 1055 struct se_device *dev, char *page)
1110 char *page)
1111{ 1056{
1112 struct se_device *dev;
1113 struct t10_pr_registration *pr_reg; 1057 struct t10_pr_registration *pr_reg;
1114 ssize_t len = 0; 1058 ssize_t len = 0;
1115 1059
1116 dev = su_dev->se_dev_ptr; 1060 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1117 if (!dev)
1118 return -ENODEV;
1119
1120 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1121 return len; 1061 return len;
1122 1062
1123 spin_lock(&dev->dev_reservation_lock); 1063 spin_lock(&dev->dev_reservation_lock);
@@ -1144,20 +1084,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1144 1084
1145SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); 1085SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
1146 1086
1147/*
1148 * res_pr_generation
1149 */
1150static ssize_t target_core_dev_pr_show_attr_res_pr_generation( 1087static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
1151 struct se_subsystem_dev *su_dev, 1088 struct se_device *dev, char *page)
1152 char *page)
1153{ 1089{
1154 if (!su_dev->se_dev_ptr) 1090 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1155 return -ENODEV;
1156
1157 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1158 return 0; 1091 return 0;
1159 1092
1160 return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); 1093 return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
1161} 1094}
1162 1095
1163SE_DEV_PR_ATTR_RO(res_pr_generation); 1096SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1166,10 +1099,8 @@ SE_DEV_PR_ATTR_RO(res_pr_generation);
1166 * res_pr_holder_tg_port 1099 * res_pr_holder_tg_port
1167 */ 1100 */
1168static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( 1101static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1169 struct se_subsystem_dev *su_dev, 1102 struct se_device *dev, char *page)
1170 char *page)
1171{ 1103{
1172 struct se_device *dev;
1173 struct se_node_acl *se_nacl; 1104 struct se_node_acl *se_nacl;
1174 struct se_lun *lun; 1105 struct se_lun *lun;
1175 struct se_portal_group *se_tpg; 1106 struct se_portal_group *se_tpg;
@@ -1177,11 +1108,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1177 struct target_core_fabric_ops *tfo; 1108 struct target_core_fabric_ops *tfo;
1178 ssize_t len = 0; 1109 ssize_t len = 0;
1179 1110
1180 dev = su_dev->se_dev_ptr; 1111 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1181 if (!dev)
1182 return -ENODEV;
1183
1184 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1185 return len; 1112 return len;
1186 1113
1187 spin_lock(&dev->dev_reservation_lock); 1114 spin_lock(&dev->dev_reservation_lock);
@@ -1211,12 +1138,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1211 1138
1212SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); 1139SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
1213 1140
1214/*
1215 * res_pr_registered_i_pts
1216 */
1217static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( 1141static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1218 struct se_subsystem_dev *su_dev, 1142 struct se_device *dev, char *page)
1219 char *page)
1220{ 1143{
1221 struct target_core_fabric_ops *tfo; 1144 struct target_core_fabric_ops *tfo;
1222 struct t10_pr_registration *pr_reg; 1145 struct t10_pr_registration *pr_reg;
@@ -1225,16 +1148,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1225 ssize_t len = 0; 1148 ssize_t len = 0;
1226 int reg_count = 0, prf_isid; 1149 int reg_count = 0, prf_isid;
1227 1150
1228 if (!su_dev->se_dev_ptr) 1151 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1229 return -ENODEV;
1230
1231 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1232 return len; 1152 return len;
1233 1153
1234 len += sprintf(page+len, "SPC-3 PR Registrations:\n"); 1154 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1235 1155
1236 spin_lock(&su_dev->t10_pr.registration_lock); 1156 spin_lock(&dev->t10_pr.registration_lock);
1237 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 1157 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1238 pr_reg_list) { 1158 pr_reg_list) {
1239 1159
1240 memset(buf, 0, 384); 1160 memset(buf, 0, 384);
@@ -1254,7 +1174,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1254 len += sprintf(page+len, "%s", buf); 1174 len += sprintf(page+len, "%s", buf);
1255 reg_count++; 1175 reg_count++;
1256 } 1176 }
1257 spin_unlock(&su_dev->t10_pr.registration_lock); 1177 spin_unlock(&dev->t10_pr.registration_lock);
1258 1178
1259 if (!reg_count) 1179 if (!reg_count)
1260 len += sprintf(page+len, "None\n"); 1180 len += sprintf(page+len, "None\n");
@@ -1264,22 +1184,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1264 1184
1265SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); 1185SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
1266 1186
1267/*
1268 * res_pr_type
1269 */
1270static ssize_t target_core_dev_pr_show_attr_res_pr_type( 1187static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1271 struct se_subsystem_dev *su_dev, 1188 struct se_device *dev, char *page)
1272 char *page)
1273{ 1189{
1274 struct se_device *dev;
1275 struct t10_pr_registration *pr_reg; 1190 struct t10_pr_registration *pr_reg;
1276 ssize_t len = 0; 1191 ssize_t len = 0;
1277 1192
1278 dev = su_dev->se_dev_ptr; 1193 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1279 if (!dev)
1280 return -ENODEV;
1281
1282 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1283 return len; 1194 return len;
1284 1195
1285 spin_lock(&dev->dev_reservation_lock); 1196 spin_lock(&dev->dev_reservation_lock);
@@ -1298,19 +1209,12 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1298 1209
1299SE_DEV_PR_ATTR_RO(res_pr_type); 1210SE_DEV_PR_ATTR_RO(res_pr_type);
1300 1211
1301/*
1302 * res_type
1303 */
1304static ssize_t target_core_dev_pr_show_attr_res_type( 1212static ssize_t target_core_dev_pr_show_attr_res_type(
1305 struct se_subsystem_dev *su_dev, 1213 struct se_device *dev, char *page)
1306 char *page)
1307{ 1214{
1308 ssize_t len = 0; 1215 ssize_t len = 0;
1309 1216
1310 if (!su_dev->se_dev_ptr) 1217 switch (dev->t10_pr.res_type) {
1311 return -ENODEV;
1312
1313 switch (su_dev->t10_pr.res_type) {
1314 case SPC3_PERSISTENT_RESERVATIONS: 1218 case SPC3_PERSISTENT_RESERVATIONS:
1315 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1219 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1316 break; 1220 break;
@@ -1330,22 +1234,14 @@ static ssize_t target_core_dev_pr_show_attr_res_type(
1330 1234
1331SE_DEV_PR_ATTR_RO(res_type); 1235SE_DEV_PR_ATTR_RO(res_type);
1332 1236
1333/*
1334 * res_aptpl_active
1335 */
1336
1337static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( 1237static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
1338 struct se_subsystem_dev *su_dev, 1238 struct se_device *dev, char *page)
1339 char *page)
1340{ 1239{
1341 if (!su_dev->se_dev_ptr) 1240 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1342 return -ENODEV;
1343
1344 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1345 return 0; 1241 return 0;
1346 1242
1347 return sprintf(page, "APTPL Bit Status: %s\n", 1243 return sprintf(page, "APTPL Bit Status: %s\n",
1348 (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); 1244 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1349} 1245}
1350 1246
1351SE_DEV_PR_ATTR_RO(res_aptpl_active); 1247SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1354,13 +1250,9 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
1354 * res_aptpl_metadata 1250 * res_aptpl_metadata
1355 */ 1251 */
1356static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( 1252static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
1357 struct se_subsystem_dev *su_dev, 1253 struct se_device *dev, char *page)
1358 char *page)
1359{ 1254{
1360 if (!su_dev->se_dev_ptr) 1255 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1361 return -ENODEV;
1362
1363 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1364 return 0; 1256 return 0;
1365 1257
1366 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 1258 return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1392,11 +1284,10 @@ static match_table_t tokens = {
1392}; 1284};
1393 1285
1394static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( 1286static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1395 struct se_subsystem_dev *su_dev, 1287 struct se_device *dev,
1396 const char *page, 1288 const char *page,
1397 size_t count) 1289 size_t count)
1398{ 1290{
1399 struct se_device *dev;
1400 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1291 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1401 unsigned char *t_fabric = NULL, *t_port = NULL; 1292 unsigned char *t_fabric = NULL, *t_port = NULL;
1402 char *orig, *ptr, *arg_p, *opts; 1293 char *orig, *ptr, *arg_p, *opts;
@@ -1408,14 +1299,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1408 u16 port_rpti = 0, tpgt = 0; 1299 u16 port_rpti = 0, tpgt = 0;
1409 u8 type = 0, scope; 1300 u8 type = 0, scope;
1410 1301
1411 dev = su_dev->se_dev_ptr; 1302 if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1412 if (!dev)
1413 return -ENODEV;
1414
1415 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1416 return 0; 1303 return 0;
1417 1304
1418 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1305 if (dev->export_count) {
1419 pr_debug("Unable to process APTPL metadata while" 1306 pr_debug("Unable to process APTPL metadata while"
1420 " active fabric exports exist\n"); 1307 " active fabric exports exist\n");
1421 return -EINVAL; 1308 return -EINVAL;
@@ -1558,7 +1445,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1558 goto out; 1445 goto out;
1559 } 1446 }
1560 1447
1561 ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, 1448 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
1562 i_port, isid, mapped_lun, t_port, tpgt, target_lun, 1449 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1563 res_holder, all_tg_pt, type); 1450 res_holder, all_tg_pt, type);
1564out: 1451out:
@@ -1573,7 +1460,7 @@ out:
1573 1460
1574SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); 1461SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1575 1462
1576CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group); 1463CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
1577 1464
1578static struct configfs_attribute *target_core_dev_pr_attrs[] = { 1465static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1579 &target_core_dev_pr_res_holder.attr, 1466 &target_core_dev_pr_res_holder.attr,
@@ -1605,18 +1492,14 @@ static struct config_item_type target_core_dev_pr_cit = {
1605 1492
1606static ssize_t target_core_show_dev_info(void *p, char *page) 1493static ssize_t target_core_show_dev_info(void *p, char *page)
1607{ 1494{
1608 struct se_subsystem_dev *se_dev = p; 1495 struct se_device *dev = p;
1609 struct se_hba *hba = se_dev->se_dev_hba; 1496 struct se_subsystem_api *t = dev->transport;
1610 struct se_subsystem_api *t = hba->transport;
1611 int bl = 0; 1497 int bl = 0;
1612 ssize_t read_bytes = 0; 1498 ssize_t read_bytes = 0;
1613 1499
1614 if (!se_dev->se_dev_ptr) 1500 transport_dump_dev_state(dev, page, &bl);
1615 return -ENODEV;
1616
1617 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
1618 read_bytes += bl; 1501 read_bytes += bl;
1619 read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes); 1502 read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
1620 return read_bytes; 1503 return read_bytes;
1621} 1504}
1622 1505
@@ -1633,17 +1516,10 @@ static ssize_t target_core_store_dev_control(
1633 const char *page, 1516 const char *page,
1634 size_t count) 1517 size_t count)
1635{ 1518{
1636 struct se_subsystem_dev *se_dev = p; 1519 struct se_device *dev = p;
1637 struct se_hba *hba = se_dev->se_dev_hba; 1520 struct se_subsystem_api *t = dev->transport;
1638 struct se_subsystem_api *t = hba->transport;
1639 1521
1640 if (!se_dev->se_dev_su_ptr) { 1522 return t->set_configfs_dev_params(dev, page, count);
1641 pr_err("Unable to locate struct se_subsystem_dev>se"
1642 "_dev_su_ptr\n");
1643 return -EINVAL;
1644 }
1645
1646 return t->set_configfs_dev_params(hba, se_dev, page, count);
1647} 1523}
1648 1524
1649static struct target_core_configfs_attribute target_core_attr_dev_control = { 1525static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -1656,12 +1532,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
1656 1532
1657static ssize_t target_core_show_dev_alias(void *p, char *page) 1533static ssize_t target_core_show_dev_alias(void *p, char *page)
1658{ 1534{
1659 struct se_subsystem_dev *se_dev = p; 1535 struct se_device *dev = p;
1660 1536
1661 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) 1537 if (!(dev->dev_flags & DF_USING_ALIAS))
1662 return 0; 1538 return 0;
1663 1539
1664 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias); 1540 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
1665} 1541}
1666 1542
1667static ssize_t target_core_store_dev_alias( 1543static ssize_t target_core_store_dev_alias(
@@ -1669,8 +1545,8 @@ static ssize_t target_core_store_dev_alias(
1669 const char *page, 1545 const char *page,
1670 size_t count) 1546 size_t count)
1671{ 1547{
1672 struct se_subsystem_dev *se_dev = p; 1548 struct se_device *dev = p;
1673 struct se_hba *hba = se_dev->se_dev_hba; 1549 struct se_hba *hba = dev->se_hba;
1674 ssize_t read_bytes; 1550 ssize_t read_bytes;
1675 1551
1676 if (count > (SE_DEV_ALIAS_LEN-1)) { 1552 if (count > (SE_DEV_ALIAS_LEN-1)) {
@@ -1680,19 +1556,18 @@ static ssize_t target_core_store_dev_alias(
1680 return -EINVAL; 1556 return -EINVAL;
1681 } 1557 }
1682 1558
1683 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1559 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
1684 "%s", page);
1685 if (!read_bytes) 1560 if (!read_bytes)
1686 return -EINVAL; 1561 return -EINVAL;
1687 if (se_dev->se_dev_alias[read_bytes - 1] == '\n') 1562 if (dev->dev_alias[read_bytes - 1] == '\n')
1688 se_dev->se_dev_alias[read_bytes - 1] = '\0'; 1563 dev->dev_alias[read_bytes - 1] = '\0';
1689 1564
1690 se_dev->su_dev_flags |= SDF_USING_ALIAS; 1565 dev->dev_flags |= DF_USING_ALIAS;
1691 1566
1692 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1567 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1693 config_item_name(&hba->hba_group.cg_item), 1568 config_item_name(&hba->hba_group.cg_item),
1694 config_item_name(&se_dev->se_dev_group.cg_item), 1569 config_item_name(&dev->dev_group.cg_item),
1695 se_dev->se_dev_alias); 1570 dev->dev_alias);
1696 1571
1697 return read_bytes; 1572 return read_bytes;
1698} 1573}
@@ -1707,12 +1582,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1707 1582
1708static ssize_t target_core_show_dev_udev_path(void *p, char *page) 1583static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1709{ 1584{
1710 struct se_subsystem_dev *se_dev = p; 1585 struct se_device *dev = p;
1711 1586
1712 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) 1587 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
1713 return 0; 1588 return 0;
1714 1589
1715 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path); 1590 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
1716} 1591}
1717 1592
1718static ssize_t target_core_store_dev_udev_path( 1593static ssize_t target_core_store_dev_udev_path(
@@ -1720,8 +1595,8 @@ static ssize_t target_core_store_dev_udev_path(
1720 const char *page, 1595 const char *page,
1721 size_t count) 1596 size_t count)
1722{ 1597{
1723 struct se_subsystem_dev *se_dev = p; 1598 struct se_device *dev = p;
1724 struct se_hba *hba = se_dev->se_dev_hba; 1599 struct se_hba *hba = dev->se_hba;
1725 ssize_t read_bytes; 1600 ssize_t read_bytes;
1726 1601
1727 if (count > (SE_UDEV_PATH_LEN-1)) { 1602 if (count > (SE_UDEV_PATH_LEN-1)) {
@@ -1731,19 +1606,19 @@ static ssize_t target_core_store_dev_udev_path(
1731 return -EINVAL; 1606 return -EINVAL;
1732 } 1607 }
1733 1608
1734 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1609 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
1735 "%s", page); 1610 "%s", page);
1736 if (!read_bytes) 1611 if (!read_bytes)
1737 return -EINVAL; 1612 return -EINVAL;
1738 if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n') 1613 if (dev->udev_path[read_bytes - 1] == '\n')
1739 se_dev->se_dev_udev_path[read_bytes - 1] = '\0'; 1614 dev->udev_path[read_bytes - 1] = '\0';
1740 1615
1741 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; 1616 dev->dev_flags |= DF_USING_UDEV_PATH;
1742 1617
1743 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1618 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1744 config_item_name(&hba->hba_group.cg_item), 1619 config_item_name(&hba->hba_group.cg_item),
1745 config_item_name(&se_dev->se_dev_group.cg_item), 1620 config_item_name(&dev->dev_group.cg_item),
1746 se_dev->se_dev_udev_path); 1621 dev->udev_path);
1747 1622
1748 return read_bytes; 1623 return read_bytes;
1749} 1624}
@@ -1761,11 +1636,9 @@ static ssize_t target_core_store_dev_enable(
1761 const char *page, 1636 const char *page,
1762 size_t count) 1637 size_t count)
1763{ 1638{
1764 struct se_subsystem_dev *se_dev = p; 1639 struct se_device *dev = p;
1765 struct se_device *dev;
1766 struct se_hba *hba = se_dev->se_dev_hba;
1767 struct se_subsystem_api *t = hba->transport;
1768 char *ptr; 1640 char *ptr;
1641 int ret;
1769 1642
1770 ptr = strstr(page, "1"); 1643 ptr = strstr(page, "1");
1771 if (!ptr) { 1644 if (!ptr) {
@@ -1773,25 +1646,10 @@ static ssize_t target_core_store_dev_enable(
1773 " is \"1\"\n"); 1646 " is \"1\"\n");
1774 return -EINVAL; 1647 return -EINVAL;
1775 } 1648 }
1776 if (se_dev->se_dev_ptr) {
1777 pr_err("se_dev->se_dev_ptr already set for storage"
1778 " object\n");
1779 return -EEXIST;
1780 }
1781
1782 if (t->check_configfs_dev_params(hba, se_dev) < 0)
1783 return -EINVAL;
1784
1785 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1786 if (IS_ERR(dev))
1787 return PTR_ERR(dev);
1788 else if (!dev)
1789 return -EINVAL;
1790
1791 se_dev->se_dev_ptr = dev;
1792 pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
1793 " %p\n", se_dev->se_dev_ptr);
1794 1649
1650 ret = target_configure_device(dev);
1651 if (ret)
1652 return ret;
1795 return count; 1653 return count;
1796} 1654}
1797 1655
@@ -1805,18 +1663,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1805 1663
1806static ssize_t target_core_show_alua_lu_gp(void *p, char *page) 1664static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1807{ 1665{
1808 struct se_device *dev; 1666 struct se_device *dev = p;
1809 struct se_subsystem_dev *su_dev = p;
1810 struct config_item *lu_ci; 1667 struct config_item *lu_ci;
1811 struct t10_alua_lu_gp *lu_gp; 1668 struct t10_alua_lu_gp *lu_gp;
1812 struct t10_alua_lu_gp_member *lu_gp_mem; 1669 struct t10_alua_lu_gp_member *lu_gp_mem;
1813 ssize_t len = 0; 1670 ssize_t len = 0;
1814 1671
1815 dev = su_dev->se_dev_ptr; 1672 if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
1816 if (!dev)
1817 return -ENODEV;
1818
1819 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
1820 return len; 1673 return len;
1821 1674
1822 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1675 lu_gp_mem = dev->dev_alua_lu_gp_mem;
@@ -1843,22 +1696,17 @@ static ssize_t target_core_store_alua_lu_gp(
1843 const char *page, 1696 const char *page,
1844 size_t count) 1697 size_t count)
1845{ 1698{
1846 struct se_device *dev; 1699 struct se_device *dev = p;
1847 struct se_subsystem_dev *su_dev = p; 1700 struct se_hba *hba = dev->se_hba;
1848 struct se_hba *hba = su_dev->se_dev_hba;
1849 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 1701 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1850 struct t10_alua_lu_gp_member *lu_gp_mem; 1702 struct t10_alua_lu_gp_member *lu_gp_mem;
1851 unsigned char buf[LU_GROUP_NAME_BUF]; 1703 unsigned char buf[LU_GROUP_NAME_BUF];
1852 int move = 0; 1704 int move = 0;
1853 1705
1854 dev = su_dev->se_dev_ptr; 1706 if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1855 if (!dev)
1856 return -ENODEV;
1857
1858 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1859 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", 1707 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
1860 config_item_name(&hba->hba_group.cg_item), 1708 config_item_name(&hba->hba_group.cg_item),
1861 config_item_name(&su_dev->se_dev_group.cg_item)); 1709 config_item_name(&dev->dev_group.cg_item));
1862 return -EINVAL; 1710 return -EINVAL;
1863 } 1711 }
1864 if (count > LU_GROUP_NAME_BUF) { 1712 if (count > LU_GROUP_NAME_BUF) {
@@ -1902,7 +1750,7 @@ static ssize_t target_core_store_alua_lu_gp(
1902 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 1750 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1903 " %hu\n", 1751 " %hu\n",
1904 config_item_name(&hba->hba_group.cg_item), 1752 config_item_name(&hba->hba_group.cg_item),
1905 config_item_name(&su_dev->se_dev_group.cg_item), 1753 config_item_name(&dev->dev_group.cg_item),
1906 config_item_name(&lu_gp->lu_gp_group.cg_item), 1754 config_item_name(&lu_gp->lu_gp_group.cg_item),
1907 lu_gp->lu_gp_id); 1755 lu_gp->lu_gp_id);
1908 1756
@@ -1927,7 +1775,7 @@ static ssize_t target_core_store_alua_lu_gp(
1927 " core/alua/lu_gps/%s, ID: %hu\n", 1775 " core/alua/lu_gps/%s, ID: %hu\n",
1928 (move) ? "Moving" : "Adding", 1776 (move) ? "Moving" : "Adding",
1929 config_item_name(&hba->hba_group.cg_item), 1777 config_item_name(&hba->hba_group.cg_item),
1930 config_item_name(&su_dev->se_dev_group.cg_item), 1778 config_item_name(&dev->dev_group.cg_item),
1931 config_item_name(&lu_gp_new->lu_gp_group.cg_item), 1779 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1932 lu_gp_new->lu_gp_id); 1780 lu_gp_new->lu_gp_id);
1933 1781
@@ -1955,69 +1803,44 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
1955 1803
1956static void target_core_dev_release(struct config_item *item) 1804static void target_core_dev_release(struct config_item *item)
1957{ 1805{
1958 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1806 struct config_group *dev_cg = to_config_group(item);
1959 struct se_subsystem_dev, se_dev_group); 1807 struct se_device *dev =
1960 struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 1808 container_of(dev_cg, struct se_device, dev_group);
1961 struct se_subsystem_api *t = hba->transport;
1962 struct config_group *dev_cg = &se_dev->se_dev_group;
1963 1809
1964 kfree(dev_cg->default_groups); 1810 kfree(dev_cg->default_groups);
1965 /* 1811 target_free_device(dev);
1966 * This pointer will set when the storage is enabled with:
1967 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
1968 */
1969 if (se_dev->se_dev_ptr) {
1970 pr_debug("Target_Core_ConfigFS: Calling se_free_"
1971 "virtual_device() for se_dev_ptr: %p\n",
1972 se_dev->se_dev_ptr);
1973
1974 se_free_virtual_device(se_dev->se_dev_ptr, hba);
1975 } else {
1976 /*
1977 * Release struct se_subsystem_dev->se_dev_su_ptr..
1978 */
1979 pr_debug("Target_Core_ConfigFS: Calling t->free_"
1980 "device() for se_dev_su_ptr: %p\n",
1981 se_dev->se_dev_su_ptr);
1982
1983 t->free_device(se_dev->se_dev_su_ptr);
1984 }
1985
1986 pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
1987 "_dev_t: %p\n", se_dev);
1988 kfree(se_dev);
1989} 1812}
1990 1813
1991static ssize_t target_core_dev_show(struct config_item *item, 1814static ssize_t target_core_dev_show(struct config_item *item,
1992 struct configfs_attribute *attr, 1815 struct configfs_attribute *attr,
1993 char *page) 1816 char *page)
1994{ 1817{
1995 struct se_subsystem_dev *se_dev = container_of( 1818 struct config_group *dev_cg = to_config_group(item);
1996 to_config_group(item), struct se_subsystem_dev, 1819 struct se_device *dev =
1997 se_dev_group); 1820 container_of(dev_cg, struct se_device, dev_group);
1998 struct target_core_configfs_attribute *tc_attr = container_of( 1821 struct target_core_configfs_attribute *tc_attr = container_of(
1999 attr, struct target_core_configfs_attribute, attr); 1822 attr, struct target_core_configfs_attribute, attr);
2000 1823
2001 if (!tc_attr->show) 1824 if (!tc_attr->show)
2002 return -EINVAL; 1825 return -EINVAL;
2003 1826
2004 return tc_attr->show(se_dev, page); 1827 return tc_attr->show(dev, page);
2005} 1828}
2006 1829
2007static ssize_t target_core_dev_store(struct config_item *item, 1830static ssize_t target_core_dev_store(struct config_item *item,
2008 struct configfs_attribute *attr, 1831 struct configfs_attribute *attr,
2009 const char *page, size_t count) 1832 const char *page, size_t count)
2010{ 1833{
2011 struct se_subsystem_dev *se_dev = container_of( 1834 struct config_group *dev_cg = to_config_group(item);
2012 to_config_group(item), struct se_subsystem_dev, 1835 struct se_device *dev =
2013 se_dev_group); 1836 container_of(dev_cg, struct se_device, dev_group);
2014 struct target_core_configfs_attribute *tc_attr = container_of( 1837 struct target_core_configfs_attribute *tc_attr = container_of(
2015 attr, struct target_core_configfs_attribute, attr); 1838 attr, struct target_core_configfs_attribute, attr);
2016 1839
2017 if (!tc_attr->store) 1840 if (!tc_attr->store)
2018 return -EINVAL; 1841 return -EINVAL;
2019 1842
2020 return tc_attr->store(se_dev, page, count); 1843 return tc_attr->store(dev, page, count);
2021} 1844}
2022 1845
2023static struct configfs_item_operations target_core_dev_item_ops = { 1846static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2107,7 +1930,6 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
2107{ 1930{
2108 struct se_device *dev; 1931 struct se_device *dev;
2109 struct se_hba *hba; 1932 struct se_hba *hba;
2110 struct se_subsystem_dev *su_dev;
2111 struct t10_alua_lu_gp_member *lu_gp_mem; 1933 struct t10_alua_lu_gp_member *lu_gp_mem;
2112 ssize_t len = 0, cur_len; 1934 ssize_t len = 0, cur_len;
2113 unsigned char buf[LU_GROUP_NAME_BUF]; 1935 unsigned char buf[LU_GROUP_NAME_BUF];
@@ -2117,12 +1939,11 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
2117 spin_lock(&lu_gp->lu_gp_lock); 1939 spin_lock(&lu_gp->lu_gp_lock);
2118 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1940 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2119 dev = lu_gp_mem->lu_gp_mem_dev; 1941 dev = lu_gp_mem->lu_gp_mem_dev;
2120 su_dev = dev->se_sub_dev; 1942 hba = dev->se_hba;
2121 hba = su_dev->se_dev_hba;
2122 1943
2123 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", 1944 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2124 config_item_name(&hba->hba_group.cg_item), 1945 config_item_name(&hba->hba_group.cg_item),
2125 config_item_name(&su_dev->se_dev_group.cg_item)); 1946 config_item_name(&dev->dev_group.cg_item));
2126 cur_len++; /* Extra byte for NULL terminator */ 1947 cur_len++; /* Extra byte for NULL terminator */
2127 1948
2128 if ((cur_len + len) > PAGE_SIZE) { 1949 if ((cur_len + len) > PAGE_SIZE) {
@@ -2260,7 +2081,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2260 const char *page, 2081 const char *page,
2261 size_t count) 2082 size_t count)
2262{ 2083{
2263 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 2084 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2264 unsigned long tmp; 2085 unsigned long tmp;
2265 int new_state, ret; 2086 int new_state, ret;
2266 2087
@@ -2284,7 +2105,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2284 return -EINVAL; 2105 return -EINVAL;
2285 } 2106 }
2286 2107
2287 ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr, 2108 ret = core_alua_do_port_transition(tg_pt_gp, dev,
2288 NULL, NULL, new_state, 0); 2109 NULL, NULL, new_state, 0);
2289 return (!ret) ? count : -EINVAL; 2110 return (!ret) ? count : -EINVAL;
2290} 2111}
@@ -2620,11 +2441,10 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
2620 struct t10_alua *alua = container_of(group, struct t10_alua, 2441 struct t10_alua *alua = container_of(group, struct t10_alua,
2621 alua_tg_pt_gps_group); 2442 alua_tg_pt_gps_group);
2622 struct t10_alua_tg_pt_gp *tg_pt_gp; 2443 struct t10_alua_tg_pt_gp *tg_pt_gp;
2623 struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
2624 struct config_group *alua_tg_pt_gp_cg = NULL; 2444 struct config_group *alua_tg_pt_gp_cg = NULL;
2625 struct config_item *alua_tg_pt_gp_ci = NULL; 2445 struct config_item *alua_tg_pt_gp_ci = NULL;
2626 2446
2627 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); 2447 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
2628 if (!tg_pt_gp) 2448 if (!tg_pt_gp)
2629 return NULL; 2449 return NULL;
2630 2450
@@ -2721,10 +2541,10 @@ static struct config_group *target_core_make_subdev(
2721 const char *name) 2541 const char *name)
2722{ 2542{
2723 struct t10_alua_tg_pt_gp *tg_pt_gp; 2543 struct t10_alua_tg_pt_gp *tg_pt_gp;
2724 struct se_subsystem_dev *se_dev;
2725 struct se_subsystem_api *t; 2544 struct se_subsystem_api *t;
2726 struct config_item *hba_ci = &group->cg_item; 2545 struct config_item *hba_ci = &group->cg_item;
2727 struct se_hba *hba = item_to_hba(hba_ci); 2546 struct se_hba *hba = item_to_hba(hba_ci);
2547 struct se_device *dev;
2728 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; 2548 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2729 struct config_group *dev_stat_grp = NULL; 2549 struct config_group *dev_stat_grp = NULL;
2730 int errno = -ENOMEM, ret; 2550 int errno = -ENOMEM, ret;
@@ -2737,120 +2557,80 @@ static struct config_group *target_core_make_subdev(
2737 */ 2557 */
2738 t = hba->transport; 2558 t = hba->transport;
2739 2559
2740 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 2560 dev = target_alloc_device(hba, name);
2741 if (!se_dev) { 2561 if (!dev)
2742 pr_err("Unable to allocate memory for" 2562 goto out_unlock;
2743 " struct se_subsystem_dev\n"); 2563
2744 goto unlock; 2564 dev_cg = &dev->dev_group;
2745 }
2746 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2747 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2748 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
2749 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
2750 spin_lock_init(&se_dev->t10_pr.registration_lock);
2751 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
2752 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
2753 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
2754 spin_lock_init(&se_dev->se_dev_lock);
2755 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
2756 se_dev->t10_wwn.t10_sub_dev = se_dev;
2757 se_dev->t10_alua.t10_sub_dev = se_dev;
2758 se_dev->se_dev_attrib.da_sub_dev = se_dev;
2759
2760 se_dev->se_dev_hba = hba;
2761 dev_cg = &se_dev->se_dev_group;
2762 2565
2763 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, 2566 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
2764 GFP_KERNEL); 2567 GFP_KERNEL);
2765 if (!dev_cg->default_groups) 2568 if (!dev_cg->default_groups)
2766 goto out; 2569 goto out_free_device;
2767 /*
2768 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
2769 * for ->allocate_virtdevice()
2770 *
2771 * se_dev->se_dev_ptr will be set after ->create_virtdev()
2772 * has been called successfully in the next level up in the
2773 * configfs tree for device object's struct config_group.
2774 */
2775 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
2776 if (!se_dev->se_dev_su_ptr) {
2777 pr_err("Unable to locate subsystem dependent pointer"
2778 " from allocate_virtdevice()\n");
2779 goto out;
2780 }
2781 2570
2782 config_group_init_type_name(&se_dev->se_dev_group, name, 2571 config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
2783 &target_core_dev_cit); 2572 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2784 config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
2785 &target_core_dev_attrib_cit); 2573 &target_core_dev_attrib_cit);
2786 config_group_init_type_name(&se_dev->se_dev_pr_group, "pr", 2574 config_group_init_type_name(&dev->dev_pr_group, "pr",
2787 &target_core_dev_pr_cit); 2575 &target_core_dev_pr_cit);
2788 config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn", 2576 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2789 &target_core_dev_wwn_cit); 2577 &target_core_dev_wwn_cit);
2790 config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, 2578 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2791 "alua", &target_core_alua_tg_pt_gps_cit); 2579 "alua", &target_core_alua_tg_pt_gps_cit);
2792 config_group_init_type_name(&se_dev->dev_stat_grps.stat_group, 2580 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2793 "statistics", &target_core_stat_cit); 2581 "statistics", &target_core_stat_cit);
2794 2582
2795 dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; 2583 dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
2796 dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; 2584 dev_cg->default_groups[1] = &dev->dev_pr_group;
2797 dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; 2585 dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
2798 dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; 2586 dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
2799 dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group; 2587 dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
2800 dev_cg->default_groups[5] = NULL; 2588 dev_cg->default_groups[5] = NULL;
2801 /* 2589 /*
2802 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 2590 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2803 */ 2591 */
2804 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); 2592 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
2805 if (!tg_pt_gp) 2593 if (!tg_pt_gp)
2806 goto out; 2594 goto out_free_dev_cg_default_groups;
2595 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2807 2596
2808 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2597 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2809 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 2598 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
2810 GFP_KERNEL); 2599 GFP_KERNEL);
2811 if (!tg_pt_gp_cg->default_groups) { 2600 if (!tg_pt_gp_cg->default_groups) {
2812 pr_err("Unable to allocate tg_pt_gp_cg->" 2601 pr_err("Unable to allocate tg_pt_gp_cg->"
2813 "default_groups\n"); 2602 "default_groups\n");
2814 goto out; 2603 goto out_free_tg_pt_gp;
2815 } 2604 }
2816 2605
2817 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, 2606 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2818 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); 2607 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2819 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; 2608 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2820 tg_pt_gp_cg->default_groups[1] = NULL; 2609 tg_pt_gp_cg->default_groups[1] = NULL;
2821 se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2822 /* 2610 /*
2823 * Add core/$HBA/$DEV/statistics/ default groups 2611 * Add core/$HBA/$DEV/statistics/ default groups
2824 */ 2612 */
2825 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2613 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2826 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, 2614 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
2827 GFP_KERNEL); 2615 GFP_KERNEL);
2828 if (!dev_stat_grp->default_groups) { 2616 if (!dev_stat_grp->default_groups) {
2829 pr_err("Unable to allocate dev_stat_grp->default_groups\n"); 2617 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
2830 goto out; 2618 goto out_free_tg_pt_gp_cg_default_groups;
2831 } 2619 }
2832 target_stat_setup_dev_default_groups(se_dev); 2620 target_stat_setup_dev_default_groups(dev);
2833
2834 pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
2835 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
2836 2621
2837 mutex_unlock(&hba->hba_access_mutex); 2622 mutex_unlock(&hba->hba_access_mutex);
2838 return &se_dev->se_dev_group; 2623 return dev_cg;
2839out: 2624
2840 if (se_dev->t10_alua.default_tg_pt_gp) { 2625out_free_tg_pt_gp_cg_default_groups:
2841 core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); 2626 kfree(tg_pt_gp_cg->default_groups);
2842 se_dev->t10_alua.default_tg_pt_gp = NULL; 2627out_free_tg_pt_gp:
2843 } 2628 core_alua_free_tg_pt_gp(tg_pt_gp);
2844 if (dev_stat_grp) 2629out_free_dev_cg_default_groups:
2845 kfree(dev_stat_grp->default_groups); 2630 kfree(dev_cg->default_groups);
2846 if (tg_pt_gp_cg) 2631out_free_device:
2847 kfree(tg_pt_gp_cg->default_groups); 2632 target_free_device(dev);
2848 if (dev_cg) 2633out_unlock:
2849 kfree(dev_cg->default_groups);
2850 if (se_dev->se_dev_su_ptr)
2851 t->free_device(se_dev->se_dev_su_ptr);
2852 kfree(se_dev);
2853unlock:
2854 mutex_unlock(&hba->hba_access_mutex); 2634 mutex_unlock(&hba->hba_access_mutex);
2855 return ERR_PTR(errno); 2635 return ERR_PTR(errno);
2856} 2636}
@@ -2859,18 +2639,19 @@ static void target_core_drop_subdev(
2859 struct config_group *group, 2639 struct config_group *group,
2860 struct config_item *item) 2640 struct config_item *item)
2861{ 2641{
2862 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 2642 struct config_group *dev_cg = to_config_group(item);
2863 struct se_subsystem_dev, se_dev_group); 2643 struct se_device *dev =
2644 container_of(dev_cg, struct se_device, dev_group);
2864 struct se_hba *hba; 2645 struct se_hba *hba;
2865 struct config_item *df_item; 2646 struct config_item *df_item;
2866 struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; 2647 struct config_group *tg_pt_gp_cg, *dev_stat_grp;
2867 int i; 2648 int i;
2868 2649
2869 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2650 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
2870 2651
2871 mutex_lock(&hba->hba_access_mutex); 2652 mutex_lock(&hba->hba_access_mutex);
2872 2653
2873 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2654 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2874 for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2655 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2875 df_item = &dev_stat_grp->default_groups[i]->cg_item; 2656 df_item = &dev_stat_grp->default_groups[i]->cg_item;
2876 dev_stat_grp->default_groups[i] = NULL; 2657 dev_stat_grp->default_groups[i] = NULL;
@@ -2878,7 +2659,7 @@ static void target_core_drop_subdev(
2878 } 2659 }
2879 kfree(dev_stat_grp->default_groups); 2660 kfree(dev_stat_grp->default_groups);
2880 2661
2881 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2662 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2882 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { 2663 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2883 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; 2664 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2884 tg_pt_gp_cg->default_groups[i] = NULL; 2665 tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2889,17 +2670,15 @@ static void target_core_drop_subdev(
2889 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp 2670 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2890 * directly from target_core_alua_tg_pt_gp_release(). 2671 * directly from target_core_alua_tg_pt_gp_release().
2891 */ 2672 */
2892 se_dev->t10_alua.default_tg_pt_gp = NULL; 2673 dev->t10_alua.default_tg_pt_gp = NULL;
2893 2674
2894 dev_cg = &se_dev->se_dev_group;
2895 for (i = 0; dev_cg->default_groups[i]; i++) { 2675 for (i = 0; dev_cg->default_groups[i]; i++) {
2896 df_item = &dev_cg->default_groups[i]->cg_item; 2676 df_item = &dev_cg->default_groups[i]->cg_item;
2897 dev_cg->default_groups[i] = NULL; 2677 dev_cg->default_groups[i] = NULL;
2898 config_item_put(df_item); 2678 config_item_put(df_item);
2899 } 2679 }
2900 /* 2680 /*
2901 * The releasing of se_dev and associated se_dev->se_dev_ptr is done 2681 * se_dev is released from target_core_dev_item_ops->release()
2902 * from target_core_dev_item_ops->release() ->target_core_dev_release().
2903 */ 2682 */
2904 config_item_put(item); 2683 config_item_put(item);
2905 mutex_unlock(&hba->hba_access_mutex); 2684 mutex_unlock(&hba->hba_access_mutex);
@@ -2962,13 +2741,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2962 return -EINVAL; 2741 return -EINVAL;
2963 } 2742 }
2964 2743
2965 spin_lock(&hba->device_lock); 2744 if (hba->dev_count) {
2966 if (!list_empty(&hba->hba_dev_list)) {
2967 pr_err("Unable to set hba_mode with active devices\n"); 2745 pr_err("Unable to set hba_mode with active devices\n");
2968 spin_unlock(&hba->device_lock);
2969 return -EINVAL; 2746 return -EINVAL;
2970 } 2747 }
2971 spin_unlock(&hba->device_lock);
2972 2748
2973 ret = transport->pmode_enable_hba(hba, mode_flag); 2749 ret = transport->pmode_enable_hba(hba, mode_flag);
2974 if (ret < 0) 2750 if (ret < 0)