aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r--drivers/target/target_core_device.c84
1 files changed, 8 insertions, 76 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index aa6267746383..5ad972856a8d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -643,9 +643,8 @@ void core_dev_unexport(
643 lun->lun_se_dev = NULL; 643 lun->lun_se_dev = NULL;
644} 644}
645 645
646int target_report_luns(struct se_task *se_task) 646int target_report_luns(struct se_cmd *se_cmd)
647{ 647{
648 struct se_cmd *se_cmd = se_task->task_se_cmd;
649 struct se_dev_entry *deve; 648 struct se_dev_entry *deve;
650 struct se_session *se_sess = se_cmd->se_sess; 649 struct se_session *se_sess = se_cmd->se_sess;
651 unsigned char *buf; 650 unsigned char *buf;
@@ -696,8 +695,7 @@ done:
696 buf[3] = (lun_count & 0xff); 695 buf[3] = (lun_count & 0xff);
697 transport_kunmap_data_sg(se_cmd); 696 transport_kunmap_data_sg(se_cmd);
698 697
699 se_task->task_scsi_status = GOOD; 698 target_complete_cmd(se_cmd, GOOD);
700 transport_complete_task(se_task, 1);
701 return 0; 699 return 0;
702} 700}
703 701
@@ -878,15 +876,12 @@ void se_dev_set_default_attribs(
878 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; 876 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
879 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; 877 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
880 /* 878 /*
881 * max_sectors is based on subsystem plugin dependent requirements. 879 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
882 */ 880 */
883 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 881 limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
884 /*
885 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
886 */
887 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
888 limits->logical_block_size); 882 limits->logical_block_size);
889 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 883 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
884
890 /* 885 /*
891 * Set fabric_max_sectors, which is reported in block limits 886 * Set fabric_max_sectors, which is reported in block limits
892 * VPD page (B0h). 887 * VPD page (B0h).
@@ -1170,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1170 return 0; 1165 return 0;
1171} 1166}
1172 1167
1173int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1174{
1175 int force = 0; /* Force setting for VDEVS */
1176
1177 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1178 pr_err("dev[%p]: Unable to change SE Device"
1179 " max_sectors while dev_export_obj: %d count exists\n",
1180 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1181 return -EINVAL;
1182 }
1183 if (!max_sectors) {
1184 pr_err("dev[%p]: Illegal ZERO value for"
1185 " max_sectors\n", dev);
1186 return -EINVAL;
1187 }
1188 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1189 pr_err("dev[%p]: Passed max_sectors: %u less than"
1190 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1191 DA_STATUS_MAX_SECTORS_MIN);
1192 return -EINVAL;
1193 }
1194 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1195 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1196 pr_err("dev[%p]: Passed max_sectors: %u"
1197 " greater than TCM/SE_Device max_sectors:"
1198 " %u\n", dev, max_sectors,
1199 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1200 return -EINVAL;
1201 }
1202 } else {
1203 if (!force && (max_sectors >
1204 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1205 pr_err("dev[%p]: Passed max_sectors: %u"
1206 " greater than TCM/SE_Device max_sectors"
1207 ": %u, use force=1 to override.\n", dev,
1208 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1209 return -EINVAL;
1210 }
1211 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1212 pr_err("dev[%p]: Passed max_sectors: %u"
1213 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1214 " %u\n", dev, max_sectors,
1215 DA_STATUS_MAX_SECTORS_MAX);
1216 return -EINVAL;
1217 }
1218 }
1219 /*
1220 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1221 */
1222 max_sectors = se_dev_align_max_sectors(max_sectors,
1223 dev->se_sub_dev->se_dev_attrib.block_size);
1224
1225 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1226 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1227 dev, max_sectors);
1228 return 0;
1229}
1230
1231int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1168int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1232{ 1169{
1233 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1170 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@@ -1341,7 +1278,6 @@ struct se_lun *core_dev_add_lun(
1341 u32 lun) 1278 u32 lun)
1342{ 1279{
1343 struct se_lun *lun_p; 1280 struct se_lun *lun_p;
1344 u32 lun_access = 0;
1345 int rc; 1281 int rc;
1346 1282
1347 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1283 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
@@ -1354,12 +1290,8 @@ struct se_lun *core_dev_add_lun(
1354 if (IS_ERR(lun_p)) 1290 if (IS_ERR(lun_p))
1355 return lun_p; 1291 return lun_p;
1356 1292
1357 if (dev->dev_flags & DF_READ_ONLY) 1293 rc = core_tpg_post_addlun(tpg, lun_p,
1358 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1294 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1359 else
1360 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1361
1362 rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
1363 if (rc < 0) 1295 if (rc < 0)
1364 return ERR_PTR(rc); 1296 return ERR_PTR(rc);
1365 1297