aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/target_core_file.c1
-rw-r--r--drivers/target/target_core_iblock.c1
-rw-r--r--drivers/target/target_core_internal.h3
-rw-r--r--drivers/target/target_core_pscsi.c470
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c450
-rw-r--r--drivers/target/target_core_spc.c1
-rw-r--r--drivers/target/target_core_transport.c736
-rw-r--r--include/target/target_core_backend.h5
10 files changed, 936 insertions, 733 deletions
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 70cab2a138d1..50b887b349c0 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -10,6 +10,7 @@ target_core_mod-y := target_core_configfs.o \
10 target_core_tpg.o \ 10 target_core_tpg.o \
11 target_core_transport.o \ 11 target_core_transport.o \
12 target_core_cdb.o \ 12 target_core_cdb.o \
13 target_core_sbc.o \
13 target_core_spc.o \ 14 target_core_spc.o \
14 target_core_ua.o \ 15 target_core_ua.o \
15 target_core_rd.o \ 16 target_core_rd.o \
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 9f99d0404908..e2df30867b13 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -561,6 +561,7 @@ static struct se_subsystem_api fileio_template = {
561 .allocate_virtdevice = fd_allocate_virtdevice, 561 .allocate_virtdevice = fd_allocate_virtdevice,
562 .create_virtdevice = fd_create_virtdevice, 562 .create_virtdevice = fd_create_virtdevice,
563 .free_device = fd_free_device, 563 .free_device = fd_free_device,
564 .parse_cdb = sbc_parse_cdb,
564 .execute_cmd = fd_execute_cmd, 565 .execute_cmd = fd_execute_cmd,
565 .do_sync_cache = fd_emulate_sync_cache, 566 .do_sync_cache = fd_emulate_sync_cache,
566 .check_configfs_dev_params = fd_check_configfs_dev_params, 567 .check_configfs_dev_params = fd_check_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index fd47950727b4..244fff4aaf5a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -653,6 +653,7 @@ static struct se_subsystem_api iblock_template = {
653 .allocate_virtdevice = iblock_allocate_virtdevice, 653 .allocate_virtdevice = iblock_allocate_virtdevice,
654 .create_virtdevice = iblock_create_virtdevice, 654 .create_virtdevice = iblock_create_virtdevice,
655 .free_device = iblock_free_device, 655 .free_device = iblock_free_device,
656 .parse_cdb = sbc_parse_cdb,
656 .execute_cmd = iblock_execute_cmd, 657 .execute_cmd = iblock_execute_cmd,
657 .do_discard = iblock_do_discard, 658 .do_discard = iblock_do_discard,
658 .do_sync_cache = iblock_emulate_sync_cache, 659 .do_sync_cache = iblock_emulate_sync_cache,
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 6bbf18dc9e0d..165e82429687 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -96,9 +96,6 @@ int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
96struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 96struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
97int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); 97int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
98 98
99/* target_core_spc.c */
100int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough);
101
102/* target_core_transport.c */ 99/* target_core_transport.c */
103extern struct kmem_cache *se_tmr_req_cache; 100extern struct kmem_cache *se_tmr_req_cache;
104 101
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index bfc72327370c..378da242d841 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -35,8 +35,10 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/genhd.h> 36#include <linux/genhd.h>
37#include <linux/cdrom.h> 37#include <linux/cdrom.h>
38#include <linux/file.h> 38#include <linux/ratelimit.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/unaligned.h>
41
40#include <scsi/scsi.h> 42#include <scsi/scsi.h>
41#include <scsi/scsi_device.h> 43#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h> 44#include <scsi/scsi_cmnd.h>
@@ -46,6 +48,7 @@
46#include <target/target_core_base.h> 48#include <target/target_core_base.h>
47#include <target/target_core_backend.h> 49#include <target/target_core_backend.h>
48 50
51#include "target_core_alua.h"
49#include "target_core_pscsi.h" 52#include "target_core_pscsi.h"
50 53
51#define ISPRINT(a) ((a >= ' ') && (a <= '~')) 54#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
@@ -1019,6 +1022,470 @@ fail:
1019 return -ENOMEM; 1022 return -ENOMEM;
1020} 1023}
1021 1024
1025static inline u32 pscsi_get_sectors_6(
1026 unsigned char *cdb,
1027 struct se_cmd *cmd,
1028 int *ret)
1029{
1030 struct se_device *dev = cmd->se_dev;
1031
1032 /*
1033 * Assume TYPE_DISK for non struct se_device objects.
1034 * Use 8-bit sector value.
1035 */
1036 if (!dev)
1037 goto type_disk;
1038
1039 /*
1040 * Use 24-bit allocation length for TYPE_TAPE.
1041 */
1042 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
1043 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
1044
1045 /*
1046 * Everything else assume TYPE_DISK Sector CDB location.
1047 * Use 8-bit sector value. SBC-3 says:
1048 *
1049 * A TRANSFER LENGTH field set to zero specifies that 256
1050 * logical blocks shall be written. Any other value
1051 * specifies the number of logical blocks that shall be
1052 * written.
1053 */
1054type_disk:
1055 return cdb[4] ? : 256;
1056}
1057
1058static inline u32 pscsi_get_sectors_10(
1059 unsigned char *cdb,
1060 struct se_cmd *cmd,
1061 int *ret)
1062{
1063 struct se_device *dev = cmd->se_dev;
1064
1065 /*
1066 * Assume TYPE_DISK for non struct se_device objects.
1067 * Use 16-bit sector value.
1068 */
1069 if (!dev)
1070 goto type_disk;
1071
1072 /*
1073 * XXX_10 is not defined in SSC, throw an exception
1074 */
1075 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
1076 *ret = -EINVAL;
1077 return 0;
1078 }
1079
1080 /*
1081 * Everything else assume TYPE_DISK Sector CDB location.
1082 * Use 16-bit sector value.
1083 */
1084type_disk:
1085 return (u32)(cdb[7] << 8) + cdb[8];
1086}
1087
1088static inline u32 pscsi_get_sectors_12(
1089 unsigned char *cdb,
1090 struct se_cmd *cmd,
1091 int *ret)
1092{
1093 struct se_device *dev = cmd->se_dev;
1094
1095 /*
1096 * Assume TYPE_DISK for non struct se_device objects.
1097 * Use 32-bit sector value.
1098 */
1099 if (!dev)
1100 goto type_disk;
1101
1102 /*
1103 * XXX_12 is not defined in SSC, throw an exception
1104 */
1105 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
1106 *ret = -EINVAL;
1107 return 0;
1108 }
1109
1110 /*
1111 * Everything else assume TYPE_DISK Sector CDB location.
1112 * Use 32-bit sector value.
1113 */
1114type_disk:
1115 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
1116}
1117
1118static inline u32 pscsi_get_sectors_16(
1119 unsigned char *cdb,
1120 struct se_cmd *cmd,
1121 int *ret)
1122{
1123 struct se_device *dev = cmd->se_dev;
1124
1125 /*
1126 * Assume TYPE_DISK for non struct se_device objects.
1127 * Use 32-bit sector value.
1128 */
1129 if (!dev)
1130 goto type_disk;
1131
1132 /*
1133 * Use 24-bit allocation length for TYPE_TAPE.
1134 */
1135 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
1136 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
1137
1138type_disk:
1139 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
1140 (cdb[12] << 8) + cdb[13];
1141}
1142
1143/*
1144 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
1145 */
1146static inline u32 pscsi_get_sectors_32(
1147 unsigned char *cdb,
1148 struct se_cmd *cmd,
1149 int *ret)
1150{
1151 /*
1152 * Assume TYPE_DISK for non struct se_device objects.
1153 * Use 32-bit sector value.
1154 */
1155 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
1156 (cdb[30] << 8) + cdb[31];
1157
1158}
1159
1160static inline u32 pscsi_get_lba_21(unsigned char *cdb)
1161{
1162 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1163}
1164
1165static inline u32 pscsi_get_lba_32(unsigned char *cdb)
1166{
1167 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1168}
1169
1170static inline unsigned long long pscsi_get_lba_64(unsigned char *cdb)
1171{
1172 unsigned int __v1, __v2;
1173
1174 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1175 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1176
1177 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1178}
1179
1180/*
1181 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1182 */
1183static inline unsigned long long pscsi_get_lba_64_ext(unsigned char *cdb)
1184{
1185 unsigned int __v1, __v2;
1186
1187 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1188 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1189
1190 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1191}
1192
1193
1194static inline u32 pscsi_get_size(
1195 u32 sectors,
1196 unsigned char *cdb,
1197 struct se_cmd *cmd)
1198{
1199 struct se_device *dev = cmd->se_dev;
1200
1201 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
1202 if (cdb[1] & 1) { /* sectors */
1203 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
1204 } else /* bytes */
1205 return sectors;
1206 }
1207
1208 pr_debug("Returning block_size: %u, sectors: %u == %u for"
1209 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
1210 sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
1211 dev->transport->name);
1212
1213 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
1214}
1215
1216static int pscsi_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1217{
1218 struct se_device *dev = cmd->se_dev;
1219 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1220 unsigned char *cdb = cmd->t_task_cdb;
1221 int sector_ret = 0;
1222 u32 sectors = 0;
1223 u16 service_action;
1224 int ret;
1225
1226 if (cmd->se_cmd_flags & SCF_BIDI)
1227 goto out_unsupported_cdb;
1228
1229 switch (cdb[0]) {
1230 case READ_6:
1231 sectors = pscsi_get_sectors_6(cdb, cmd, &sector_ret);
1232 if (sector_ret)
1233 goto out_unsupported_cdb;
1234 *size = pscsi_get_size(sectors, cdb, cmd);
1235 cmd->t_task_lba = pscsi_get_lba_21(cdb);
1236 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1237 break;
1238 case READ_10:
1239 sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
1240 if (sector_ret)
1241 goto out_unsupported_cdb;
1242 *size = pscsi_get_size(sectors, cdb, cmd);
1243 cmd->t_task_lba = pscsi_get_lba_32(cdb);
1244 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1245 break;
1246 case READ_12:
1247 sectors = pscsi_get_sectors_12(cdb, cmd, &sector_ret);
1248 if (sector_ret)
1249 goto out_unsupported_cdb;
1250 *size = pscsi_get_size(sectors, cdb, cmd);
1251 cmd->t_task_lba = pscsi_get_lba_32(cdb);
1252 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1253 break;
1254 case READ_16:
1255 sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
1256 if (sector_ret)
1257 goto out_unsupported_cdb;
1258 *size = pscsi_get_size(sectors, cdb, cmd);
1259 cmd->t_task_lba = pscsi_get_lba_64(cdb);
1260 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1261 break;
1262 case WRITE_6:
1263 sectors = pscsi_get_sectors_6(cdb, cmd, &sector_ret);
1264 if (sector_ret)
1265 goto out_unsupported_cdb;
1266 *size = pscsi_get_size(sectors, cdb, cmd);
1267 cmd->t_task_lba = pscsi_get_lba_21(cdb);
1268 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1269 break;
1270 case WRITE_10:
1271 case WRITE_VERIFY:
1272 sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
1273 if (sector_ret)
1274 goto out_unsupported_cdb;
1275 *size = pscsi_get_size(sectors, cdb, cmd);
1276 cmd->t_task_lba = pscsi_get_lba_32(cdb);
1277 if (cdb[1] & 0x8)
1278 cmd->se_cmd_flags |= SCF_FUA;
1279 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1280 break;
1281 case WRITE_12:
1282 sectors = pscsi_get_sectors_12(cdb, cmd, &sector_ret);
1283 if (sector_ret)
1284 goto out_unsupported_cdb;
1285 *size = pscsi_get_size(sectors, cdb, cmd);
1286 cmd->t_task_lba = pscsi_get_lba_32(cdb);
1287 if (cdb[1] & 0x8)
1288 cmd->se_cmd_flags |= SCF_FUA;
1289 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1290 break;
1291 case WRITE_16:
1292 sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
1293 if (sector_ret)
1294 goto out_unsupported_cdb;
1295 *size = pscsi_get_size(sectors, cdb, cmd);
1296 cmd->t_task_lba = pscsi_get_lba_64(cdb);
1297 if (cdb[1] & 0x8)
1298 cmd->se_cmd_flags |= SCF_FUA;
1299 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1300 break;
1301 case VARIABLE_LENGTH_CMD:
1302 service_action = get_unaligned_be16(&cdb[8]);
1303 switch (service_action) {
1304 case WRITE_SAME_32:
1305 sectors = pscsi_get_sectors_32(cdb, cmd, &sector_ret);
1306 if (sector_ret)
1307 goto out_unsupported_cdb;
1308
1309 if (!sectors) {
1310 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
1311 " supported\n");
1312 goto out_invalid_cdb_field;
1313 }
1314
1315 *size = pscsi_get_size(1, cdb, cmd);
1316 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
1317 break;
1318 default:
1319 pr_err("VARIABLE_LENGTH_CMD service action"
1320 " 0x%04x not supported\n", service_action);
1321 goto out_unsupported_cdb;
1322 }
1323 break;
1324 case GPCMD_READ_BUFFER_CAPACITY:
1325 case GPCMD_SEND_OPC:
1326 *size = (cdb[7] << 8) + cdb[8];
1327 break;
1328 case READ_BLOCK_LIMITS:
1329 *size = READ_BLOCK_LEN;
1330 break;
1331 case GPCMD_GET_CONFIGURATION:
1332 case GPCMD_READ_FORMAT_CAPACITIES:
1333 case GPCMD_READ_DISC_INFO:
1334 case GPCMD_READ_TRACK_RZONE_INFO:
1335 *size = (cdb[7] << 8) + cdb[8];
1336 break;
1337 case GPCMD_MECHANISM_STATUS:
1338 case GPCMD_READ_DVD_STRUCTURE:
1339 *size = (cdb[8] << 8) + cdb[9];
1340 break;
1341 case READ_POSITION:
1342 *size = READ_POSITION_LEN;
1343 break;
1344 case READ_BUFFER:
1345 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
1346 break;
1347 case READ_CAPACITY:
1348 *size = READ_CAP_LEN;
1349 break;
1350 case READ_MEDIA_SERIAL_NUMBER:
1351 case SERVICE_ACTION_IN:
1352 case ACCESS_CONTROL_IN:
1353 case ACCESS_CONTROL_OUT:
1354 *size = (cdb[10] << 24) | (cdb[11] << 16) |
1355 (cdb[12] << 8) | cdb[13];
1356 break;
1357 case READ_TOC:
1358 *size = cdb[8];
1359 break;
1360 case READ_ELEMENT_STATUS:
1361 *size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
1362 break;
1363 case SYNCHRONIZE_CACHE:
1364 case SYNCHRONIZE_CACHE_16:
1365 /*
1366 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
1367 */
1368 if (cdb[0] == SYNCHRONIZE_CACHE) {
1369 sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
1370 cmd->t_task_lba = pscsi_get_lba_32(cdb);
1371 } else {
1372 sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
1373 cmd->t_task_lba = pscsi_get_lba_64(cdb);
1374 }
1375 if (sector_ret)
1376 goto out_unsupported_cdb;
1377
1378 *size = pscsi_get_size(sectors, cdb, cmd);
1379 break;
1380 case UNMAP:
1381 *size = get_unaligned_be16(&cdb[7]);
1382 break;
1383 case WRITE_SAME_16:
1384 sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
1385 if (sector_ret)
1386 goto out_unsupported_cdb;
1387
1388 if (!sectors) {
1389 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1390 goto out_invalid_cdb_field;
1391 }
1392
1393 *size = pscsi_get_size(1, cdb, cmd);
1394 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1395 break;
1396 case WRITE_SAME:
1397 sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
1398 if (sector_ret)
1399 goto out_unsupported_cdb;
1400
1401 if (!sectors) {
1402 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1403 goto out_invalid_cdb_field;
1404 }
1405
1406 *size = pscsi_get_size(1, cdb, cmd);
1407 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
1408 break;
1409 case ALLOW_MEDIUM_REMOVAL:
1410 case ERASE:
1411 case REZERO_UNIT:
1412 case SEEK_10:
1413 case SPACE:
1414 case START_STOP:
1415 case VERIFY:
1416 case WRITE_FILEMARKS:
1417 case GPCMD_CLOSE_TRACK:
1418 case INITIALIZE_ELEMENT_STATUS:
1419 case GPCMD_LOAD_UNLOAD:
1420 case GPCMD_SET_SPEED:
1421 case MOVE_MEDIUM:
1422 *size = 0;
1423 break;
1424 case GET_EVENT_STATUS_NOTIFICATION:
1425 *size = (cdb[7] << 8) | cdb[8];
1426 break;
1427 case ATA_16:
1428 switch (cdb[2] & 0x3) { /* T_LENGTH */
1429 case 0x0:
1430 sectors = 0;
1431 break;
1432 case 0x1:
1433 sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
1434 break;
1435 case 0x2:
1436 sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
1437 break;
1438 case 0x3:
1439 pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
1440 goto out_invalid_cdb_field;
1441 }
1442
1443 /* BYTE_BLOCK */
1444 if (cdb[2] & 0x4) {
1445 /* BLOCK T_TYPE: 512 or sector */
1446 *size = sectors * ((cdb[2] & 0x10) ?
1447 dev->se_sub_dev->se_dev_attrib.block_size : 512);
1448 } else {
1449 /* BYTE */
1450 *size = sectors;
1451 }
1452 break;
1453 default:
1454 ret = spc_parse_cdb(cmd, size, true);
1455 if (ret)
1456 return ret;
1457 }
1458
1459 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1460 if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
1461 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
1462 " big sectors %u exceeds fabric_max_sectors:"
1463 " %u\n", cdb[0], sectors,
1464 su_dev->se_dev_attrib.fabric_max_sectors);
1465 goto out_invalid_cdb_field;
1466 }
1467 if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
1468 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
1469 " big sectors %u exceeds backend hw_max_sectors:"
1470 " %u\n", cdb[0], sectors,
1471 su_dev->se_dev_attrib.hw_max_sectors);
1472 goto out_invalid_cdb_field;
1473 }
1474 }
1475
1476 return 0;
1477
1478out_unsupported_cdb:
1479 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1480 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1481 return -EINVAL;
1482out_invalid_cdb_field:
1483 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1484 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1485 return -EINVAL;
1486}
1487
1488
1022static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1489static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1023 u32 sgl_nents, enum dma_data_direction data_direction) 1490 u32 sgl_nents, enum dma_data_direction data_direction)
1024{ 1491{
@@ -1188,6 +1655,7 @@ static struct se_subsystem_api pscsi_template = {
1188 .create_virtdevice = pscsi_create_virtdevice, 1655 .create_virtdevice = pscsi_create_virtdevice,
1189 .free_device = pscsi_free_device, 1656 .free_device = pscsi_free_device,
1190 .transport_complete = pscsi_transport_complete, 1657 .transport_complete = pscsi_transport_complete,
1658 .parse_cdb = pscsi_parse_cdb,
1191 .execute_cmd = pscsi_execute_cmd, 1659 .execute_cmd = pscsi_execute_cmd,
1192 .check_configfs_dev_params = pscsi_check_configfs_dev_params, 1660 .check_configfs_dev_params = pscsi_check_configfs_dev_params,
1193 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1661 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index d0ceb873c0e5..d7e838287d89 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -468,6 +468,7 @@ static struct se_subsystem_api rd_mcp_template = {
468 .allocate_virtdevice = rd_allocate_virtdevice, 468 .allocate_virtdevice = rd_allocate_virtdevice,
469 .create_virtdevice = rd_create_virtdevice, 469 .create_virtdevice = rd_create_virtdevice,
470 .free_device = rd_free_device, 470 .free_device = rd_free_device,
471 .parse_cdb = sbc_parse_cdb,
471 .execute_cmd = rd_execute_cmd, 472 .execute_cmd = rd_execute_cmd,
472 .check_configfs_dev_params = rd_check_configfs_dev_params, 473 .check_configfs_dev_params = rd_check_configfs_dev_params,
473 .set_configfs_dev_params = rd_set_configfs_dev_params, 474 .set_configfs_dev_params = rd_set_configfs_dev_params,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
new file mode 100644
index 000000000000..9d1ca3814876
--- /dev/null
+++ b/drivers/target/target_core_sbc.c
@@ -0,0 +1,450 @@
1/*
2 * SCSI Block Commands (SBC) parsing and emulation.
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
5 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
6 * Copyright (c) 2007-2010 Rising Tide Systems
7 * Copyright (c) 2008-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/ratelimit.h>
29#include <asm/unaligned.h>
30#include <scsi/scsi.h>
31
32#include <target/target_core_base.h>
33#include <target/target_core_backend.h>
34#include <target/target_core_fabric.h>
35
36#include "target_core_internal.h"
37#include "target_core_ua.h"
38
39
40static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
41{
42 return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
43}
44
45static int sbc_check_valid_sectors(struct se_cmd *cmd)
46{
47 struct se_device *dev = cmd->se_dev;
48 unsigned long long end_lba;
49 u32 sectors;
50
51 sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
52 end_lba = dev->transport->get_blocks(dev) + 1;
53
54 if (cmd->t_task_lba + sectors > end_lba) {
55 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
56 cmd->t_task_lba, sectors, end_lba);
57 return -EINVAL;
58 }
59
60 return 0;
61}
62
63static inline u32 transport_get_sectors_6(unsigned char *cdb)
64{
65 /*
66 * Use 8-bit sector value. SBC-3 says:
67 *
68 * A TRANSFER LENGTH field set to zero specifies that 256
69 * logical blocks shall be written. Any other value
70 * specifies the number of logical blocks that shall be
71 * written.
72 */
73 return cdb[4] ? : 256;
74}
75
76static inline u32 transport_get_sectors_10(unsigned char *cdb)
77{
78 return (u32)(cdb[7] << 8) + cdb[8];
79}
80
81static inline u32 transport_get_sectors_12(unsigned char *cdb)
82{
83 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
84}
85
86static inline u32 transport_get_sectors_16(unsigned char *cdb)
87{
88 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
89 (cdb[12] << 8) + cdb[13];
90}
91
92/*
93 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
94 */
95static inline u32 transport_get_sectors_32(unsigned char *cdb)
96{
97 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
98 (cdb[30] << 8) + cdb[31];
99
100}
101
102static inline u32 transport_lba_21(unsigned char *cdb)
103{
104 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
105}
106
107static inline u32 transport_lba_32(unsigned char *cdb)
108{
109 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
110}
111
112static inline unsigned long long transport_lba_64(unsigned char *cdb)
113{
114 unsigned int __v1, __v2;
115
116 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
117 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
118
119 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
120}
121
122/*
123 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
124 */
125static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
126{
127 unsigned int __v1, __v2;
128
129 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
130 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
131
132 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
133}
134
135static int sbc_write_same_supported(struct se_device *dev,
136 unsigned char *flags)
137{
138 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
139 pr_err("WRITE_SAME PBDATA and LBDATA"
140 " bits not supported for Block Discard"
141 " Emulation\n");
142 return -ENOSYS;
143 }
144
145 /*
146 * Currently for the emulated case we only accept
147 * tpws with the UNMAP=1 bit set.
148 */
149 if (!(flags[0] & 0x08)) {
150 pr_err("WRITE_SAME w/o UNMAP bit not"
151 " supported for Block Discard Emulation\n");
152 return -ENOSYS;
153 }
154
155 return 0;
156}
157
158static void xdreadwrite_callback(struct se_cmd *cmd)
159{
160 unsigned char *buf, *addr;
161 struct scatterlist *sg;
162 unsigned int offset;
163 int i;
164 int count;
165 /*
166 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
167 *
168 * 1) read the specified logical block(s);
169 * 2) transfer logical blocks from the data-out buffer;
170 * 3) XOR the logical blocks transferred from the data-out buffer with
171 * the logical blocks read, storing the resulting XOR data in a buffer;
172 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
173 * blocks transferred from the data-out buffer; and
174 * 5) transfer the resulting XOR data to the data-in buffer.
175 */
176 buf = kmalloc(cmd->data_length, GFP_KERNEL);
177 if (!buf) {
178 pr_err("Unable to allocate xor_callback buf\n");
179 return;
180 }
181 /*
182 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
183 * into the locally allocated *buf
184 */
185 sg_copy_to_buffer(cmd->t_data_sg,
186 cmd->t_data_nents,
187 buf,
188 cmd->data_length);
189
190 /*
191 * Now perform the XOR against the BIDI read memory located at
192 * cmd->t_mem_bidi_list
193 */
194
195 offset = 0;
196 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
197 addr = kmap_atomic(sg_page(sg));
198 if (!addr)
199 goto out;
200
201 for (i = 0; i < sg->length; i++)
202 *(addr + sg->offset + i) ^= *(buf + offset + i);
203
204 offset += sg->length;
205 kunmap_atomic(addr);
206 }
207
208out:
209 kfree(buf);
210}
211
212int sbc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
213{
214 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
215 struct se_device *dev = cmd->se_dev;
216 unsigned char *cdb = cmd->t_task_cdb;
217 u32 sectors = 0;
218 int ret;
219
220 switch (cdb[0]) {
221 case READ_6:
222 sectors = transport_get_sectors_6(cdb);
223 cmd->t_task_lba = transport_lba_21(cdb);
224 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
225 break;
226 case READ_10:
227 sectors = transport_get_sectors_10(cdb);
228 cmd->t_task_lba = transport_lba_32(cdb);
229 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
230 break;
231 case READ_12:
232 sectors = transport_get_sectors_12(cdb);
233 cmd->t_task_lba = transport_lba_32(cdb);
234 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
235 break;
236 case READ_16:
237 sectors = transport_get_sectors_16(cdb);
238 cmd->t_task_lba = transport_lba_64(cdb);
239 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
240 break;
241 case WRITE_6:
242 sectors = transport_get_sectors_6(cdb);
243 cmd->t_task_lba = transport_lba_21(cdb);
244 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
245 break;
246 case WRITE_10:
247 case WRITE_VERIFY:
248 sectors = transport_get_sectors_10(cdb);
249 cmd->t_task_lba = transport_lba_32(cdb);
250 if (cdb[1] & 0x8)
251 cmd->se_cmd_flags |= SCF_FUA;
252 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
253 break;
254 case WRITE_12:
255 sectors = transport_get_sectors_12(cdb);
256 cmd->t_task_lba = transport_lba_32(cdb);
257 if (cdb[1] & 0x8)
258 cmd->se_cmd_flags |= SCF_FUA;
259 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
260 break;
261 case WRITE_16:
262 sectors = transport_get_sectors_16(cdb);
263 cmd->t_task_lba = transport_lba_64(cdb);
264 if (cdb[1] & 0x8)
265 cmd->se_cmd_flags |= SCF_FUA;
266 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
267 break;
268 case XDWRITEREAD_10:
269 if ((cmd->data_direction != DMA_TO_DEVICE) ||
270 !(cmd->se_cmd_flags & SCF_BIDI))
271 goto out_invalid_cdb_field;
272 sectors = transport_get_sectors_10(cdb);
273
274 cmd->t_task_lba = transport_lba_32(cdb);
275 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
276
277 /*
278 * Setup BIDI XOR callback to be run after I/O completion.
279 */
280 cmd->transport_complete_callback = &xdreadwrite_callback;
281 if (cdb[1] & 0x8)
282 cmd->se_cmd_flags |= SCF_FUA;
283 break;
284 case VARIABLE_LENGTH_CMD:
285 {
286 u16 service_action = get_unaligned_be16(&cdb[8]);
287 switch (service_action) {
288 case XDWRITEREAD_32:
289 sectors = transport_get_sectors_32(cdb);
290
291 /*
292 * Use WRITE_32 and READ_32 opcodes for the emulated
293 * XDWRITE_READ_32 logic.
294 */
295 cmd->t_task_lba = transport_lba_64_ext(cdb);
296 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
297
298 /*
299 * Setup BIDI XOR callback to be run during after I/O
300 * completion.
301 */
302 cmd->transport_complete_callback = &xdreadwrite_callback;
303 if (cdb[1] & 0x8)
304 cmd->se_cmd_flags |= SCF_FUA;
305 break;
306 case WRITE_SAME_32:
307 sectors = transport_get_sectors_32(cdb);
308 if (!sectors) {
309 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
310 " supported\n");
311 goto out_invalid_cdb_field;
312 }
313
314 *size = sbc_get_size(cmd, 1);
315 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
316
317 if (sbc_write_same_supported(dev, &cdb[10]) < 0)
318 goto out_unsupported_cdb;
319 cmd->execute_cmd = target_emulate_write_same;
320 break;
321 default:
322 pr_err("VARIABLE_LENGTH_CMD service action"
323 " 0x%04x not supported\n", service_action);
324 goto out_unsupported_cdb;
325 }
326 break;
327 }
328 case READ_CAPACITY:
329 *size = READ_CAP_LEN;
330 cmd->execute_cmd = target_emulate_readcapacity;
331 break;
332 case SERVICE_ACTION_IN:
333 switch (cmd->t_task_cdb[1] & 0x1f) {
334 case SAI_READ_CAPACITY_16:
335 cmd->execute_cmd = target_emulate_readcapacity_16;
336 break;
337 default:
338 pr_err("Unsupported SA: 0x%02x\n",
339 cmd->t_task_cdb[1] & 0x1f);
340 goto out_invalid_cdb_field;
341 }
342 *size = (cdb[10] << 24) | (cdb[11] << 16) |
343 (cdb[12] << 8) | cdb[13];
344 break;
345 case SYNCHRONIZE_CACHE:
346 case SYNCHRONIZE_CACHE_16:
347 /*
348 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
349 */
350 if (cdb[0] == SYNCHRONIZE_CACHE) {
351 sectors = transport_get_sectors_10(cdb);
352 cmd->t_task_lba = transport_lba_32(cdb);
353 } else {
354 sectors = transport_get_sectors_16(cdb);
355 cmd->t_task_lba = transport_lba_64(cdb);
356 }
357
358 *size = sbc_get_size(cmd, sectors);
359
360 /*
361 * Check to ensure that LBA + Range does not exceed past end of
362 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
363 */
364 if (cmd->t_task_lba || sectors) {
365 if (sbc_check_valid_sectors(cmd) < 0)
366 goto out_invalid_cdb_field;
367 }
368 cmd->execute_cmd = target_emulate_synchronize_cache;
369 break;
370 case UNMAP:
371 *size = get_unaligned_be16(&cdb[7]);
372 cmd->execute_cmd = target_emulate_unmap;
373 break;
374 case WRITE_SAME_16:
375 sectors = transport_get_sectors_16(cdb);
376 if (!sectors) {
377 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
378 goto out_invalid_cdb_field;
379 }
380
381 *size = sbc_get_size(cmd, 1);
382 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
383
384 if (sbc_write_same_supported(dev, &cdb[1]) < 0)
385 goto out_unsupported_cdb;
386 cmd->execute_cmd = target_emulate_write_same;
387 break;
388 case WRITE_SAME:
389 sectors = transport_get_sectors_10(cdb);
390 if (!sectors) {
391 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
392 goto out_invalid_cdb_field;
393 }
394
395 *size = sbc_get_size(cmd, 1);
396 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
397
398 /*
399 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
400 * of byte 1 bit 3 UNMAP instead of original reserved field
401 */
402 if (sbc_write_same_supported(dev, &cdb[1]) < 0)
403 goto out_unsupported_cdb;
404 cmd->execute_cmd = target_emulate_write_same;
405 break;
406 case VERIFY:
407 *size = 0;
408 cmd->execute_cmd = target_emulate_noop;
409 break;
410 default:
411 ret = spc_parse_cdb(cmd, size, false);
412 if (ret)
413 return ret;
414 }
415
416 /* reject any command that we don't have a handler for */
417 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
418 goto out_unsupported_cdb;
419
420 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
421 if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
422 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
423 " big sectors %u exceeds fabric_max_sectors:"
424 " %u\n", cdb[0], sectors,
425 su_dev->se_dev_attrib.fabric_max_sectors);
426 goto out_invalid_cdb_field;
427 }
428 if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
429 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
430 " big sectors %u exceeds backend hw_max_sectors:"
431 " %u\n", cdb[0], sectors,
432 su_dev->se_dev_attrib.hw_max_sectors);
433 goto out_invalid_cdb_field;
434 }
435
436 *size = sbc_get_size(cmd, sectors);
437 }
438
439 return 0;
440
441out_unsupported_cdb:
442 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
443 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
444 return -EINVAL;
445out_invalid_cdb_field:
446 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
447 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
448 return -EINVAL;
449}
450EXPORT_SYMBOL(sbc_parse_cdb);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index ec2108667d65..156291fbf6d8 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -152,6 +152,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough)
152 cmd->sam_task_attr = MSG_HEAD_TAG; 152 cmd->sam_task_attr = MSG_HEAD_TAG;
153 break; 153 break;
154 case TEST_UNIT_READY: 154 case TEST_UNIT_READY:
155 *size = 0;
155 if (!passthrough) 156 if (!passthrough)
156 cmd->execute_cmd = target_emulate_noop; 157 cmd->execute_cmd = target_emulate_noop;
157 break; 158 break;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index a8a3d1544e65..0adabd37cbb1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1343,8 +1343,6 @@ static inline void transport_generic_prepare_cdb(
1343 } 1343 }
1344} 1344}
1345 1345
1346static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1347
1348static int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1346static int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1349{ 1347{
1350 struct se_device *dev = cmd->se_dev; 1348 struct se_device *dev = cmd->se_dev;
@@ -1471,6 +1469,7 @@ int target_setup_cmd_from_cdb(
1471 u32 pr_reg_type = 0; 1469 u32 pr_reg_type = 0;
1472 u8 alua_ascq = 0; 1470 u8 alua_ascq = 0;
1473 unsigned long flags; 1471 unsigned long flags;
1472 unsigned int size;
1474 int ret; 1473 int ret;
1475 1474
1476 transport_generic_prepare_cdb(cdb); 1475 transport_generic_prepare_cdb(cdb);
@@ -1562,13 +1561,11 @@ int target_setup_cmd_from_cdb(
1562 */ 1561 */
1563 } 1562 }
1564 1563
1565 /* 1564 ret = cmd->se_dev->transport->parse_cdb(cmd, &size);
1566 * Setup the received CDB based on SCSI defined opcodes and 1565 if (ret < 0)
1567 * perform unit attention, persistent reservations and ALUA 1566 return ret;
1568 * checks for virtual device backends. The cmd->t_task_cdb 1567
1569 * pointer is expected to be setup before we reach this point. 1568 ret = target_cmd_size_check(cmd, size);
1570 */
1571 ret = transport_generic_cmd_sequencer(cmd, cdb);
1572 if (ret < 0) 1569 if (ret < 0)
1573 return ret; 1570 return ret;
1574 1571
@@ -1694,10 +1691,7 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1694 target_put_sess_cmd(se_sess, se_cmd); 1691 target_put_sess_cmd(se_sess, se_cmd);
1695 return; 1692 return;
1696 } 1693 }
1697 /* 1694
1698 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1699 * allocate the necessary tasks to complete the received CDB+data
1700 */
1701 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1695 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1702 if (rc != 0) { 1696 if (rc != 0) {
1703 transport_generic_request_failure(se_cmd); 1697 transport_generic_request_failure(se_cmd);
@@ -1966,39 +1960,6 @@ queue_full:
1966} 1960}
1967EXPORT_SYMBOL(transport_generic_request_failure); 1961EXPORT_SYMBOL(transport_generic_request_failure);
1968 1962
1969static inline u32 transport_lba_21(unsigned char *cdb)
1970{
1971 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1972}
1973
1974static inline u32 transport_lba_32(unsigned char *cdb)
1975{
1976 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1977}
1978
1979static inline unsigned long long transport_lba_64(unsigned char *cdb)
1980{
1981 unsigned int __v1, __v2;
1982
1983 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1984 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1985
1986 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1987}
1988
1989/*
1990 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1991 */
1992static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1993{
1994 unsigned int __v1, __v2;
1995
1996 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1997 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1998
1999 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2000}
2001
2002/* 1963/*
2003 * Called from Fabric Module context from transport_execute_tasks() 1964 * Called from Fabric Module context from transport_execute_tasks()
2004 * 1965 *
@@ -2147,217 +2108,6 @@ check_depth:
2147 return 0; 2108 return 0;
2148} 2109}
2149 2110
2150static inline u32 transport_get_sectors_6(
2151 unsigned char *cdb,
2152 struct se_cmd *cmd,
2153 int *ret)
2154{
2155 struct se_device *dev = cmd->se_dev;
2156
2157 /*
2158 * Assume TYPE_DISK for non struct se_device objects.
2159 * Use 8-bit sector value.
2160 */
2161 if (!dev)
2162 goto type_disk;
2163
2164 /*
2165 * Use 24-bit allocation length for TYPE_TAPE.
2166 */
2167 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2168 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2169
2170 /*
2171 * Everything else assume TYPE_DISK Sector CDB location.
2172 * Use 8-bit sector value. SBC-3 says:
2173 *
2174 * A TRANSFER LENGTH field set to zero specifies that 256
2175 * logical blocks shall be written. Any other value
2176 * specifies the number of logical blocks that shall be
2177 * written.
2178 */
2179type_disk:
2180 return cdb[4] ? : 256;
2181}
2182
2183static inline u32 transport_get_sectors_10(
2184 unsigned char *cdb,
2185 struct se_cmd *cmd,
2186 int *ret)
2187{
2188 struct se_device *dev = cmd->se_dev;
2189
2190 /*
2191 * Assume TYPE_DISK for non struct se_device objects.
2192 * Use 16-bit sector value.
2193 */
2194 if (!dev)
2195 goto type_disk;
2196
2197 /*
2198 * XXX_10 is not defined in SSC, throw an exception
2199 */
2200 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2201 *ret = -EINVAL;
2202 return 0;
2203 }
2204
2205 /*
2206 * Everything else assume TYPE_DISK Sector CDB location.
2207 * Use 16-bit sector value.
2208 */
2209type_disk:
2210 return (u32)(cdb[7] << 8) + cdb[8];
2211}
2212
2213static inline u32 transport_get_sectors_12(
2214 unsigned char *cdb,
2215 struct se_cmd *cmd,
2216 int *ret)
2217{
2218 struct se_device *dev = cmd->se_dev;
2219
2220 /*
2221 * Assume TYPE_DISK for non struct se_device objects.
2222 * Use 32-bit sector value.
2223 */
2224 if (!dev)
2225 goto type_disk;
2226
2227 /*
2228 * XXX_12 is not defined in SSC, throw an exception
2229 */
2230 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2231 *ret = -EINVAL;
2232 return 0;
2233 }
2234
2235 /*
2236 * Everything else assume TYPE_DISK Sector CDB location.
2237 * Use 32-bit sector value.
2238 */
2239type_disk:
2240 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2241}
2242
2243static inline u32 transport_get_sectors_16(
2244 unsigned char *cdb,
2245 struct se_cmd *cmd,
2246 int *ret)
2247{
2248 struct se_device *dev = cmd->se_dev;
2249
2250 /*
2251 * Assume TYPE_DISK for non struct se_device objects.
2252 * Use 32-bit sector value.
2253 */
2254 if (!dev)
2255 goto type_disk;
2256
2257 /*
2258 * Use 24-bit allocation length for TYPE_TAPE.
2259 */
2260 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2261 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2262
2263type_disk:
2264 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2265 (cdb[12] << 8) + cdb[13];
2266}
2267
2268/*
2269 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2270 */
2271static inline u32 transport_get_sectors_32(
2272 unsigned char *cdb,
2273 struct se_cmd *cmd,
2274 int *ret)
2275{
2276 /*
2277 * Assume TYPE_DISK for non struct se_device objects.
2278 * Use 32-bit sector value.
2279 */
2280 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2281 (cdb[30] << 8) + cdb[31];
2282
2283}
2284
2285static inline u32 transport_get_size(
2286 u32 sectors,
2287 unsigned char *cdb,
2288 struct se_cmd *cmd)
2289{
2290 struct se_device *dev = cmd->se_dev;
2291
2292 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2293 if (cdb[1] & 1) { /* sectors */
2294 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2295 } else /* bytes */
2296 return sectors;
2297 }
2298
2299 pr_debug("Returning block_size: %u, sectors: %u == %u for"
2300 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
2301 sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2302 dev->transport->name);
2303
2304 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2305}
2306
2307static void transport_xor_callback(struct se_cmd *cmd)
2308{
2309 unsigned char *buf, *addr;
2310 struct scatterlist *sg;
2311 unsigned int offset;
2312 int i;
2313 int count;
2314 /*
2315 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2316 *
2317 * 1) read the specified logical block(s);
2318 * 2) transfer logical blocks from the data-out buffer;
2319 * 3) XOR the logical blocks transferred from the data-out buffer with
2320 * the logical blocks read, storing the resulting XOR data in a buffer;
2321 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2322 * blocks transferred from the data-out buffer; and
2323 * 5) transfer the resulting XOR data to the data-in buffer.
2324 */
2325 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2326 if (!buf) {
2327 pr_err("Unable to allocate xor_callback buf\n");
2328 return;
2329 }
2330 /*
2331 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2332 * into the locally allocated *buf
2333 */
2334 sg_copy_to_buffer(cmd->t_data_sg,
2335 cmd->t_data_nents,
2336 buf,
2337 cmd->data_length);
2338
2339 /*
2340 * Now perform the XOR against the BIDI read memory located at
2341 * cmd->t_mem_bidi_list
2342 */
2343
2344 offset = 0;
2345 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2346 addr = kmap_atomic(sg_page(sg));
2347 if (!addr)
2348 goto out;
2349
2350 for (i = 0; i < sg->length; i++)
2351 *(addr + sg->offset + i) ^= *(buf + offset + i);
2352
2353 offset += sg->length;
2354 kunmap_atomic(addr);
2355 }
2356
2357out:
2358 kfree(buf);
2359}
2360
2361/* 2111/*
2362 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd 2112 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2363 */ 2113 */
@@ -2439,478 +2189,6 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2439 return 0; 2189 return 0;
2440} 2190}
2441 2191
2442static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2443{
2444 /*
2445 * Determine if the received WRITE_SAME is used to for direct
2446 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2447 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2448 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2449 */
2450 int passthrough = (dev->transport->transport_type ==
2451 TRANSPORT_PLUGIN_PHBA_PDEV);
2452
2453 if (!passthrough) {
2454 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2455 pr_err("WRITE_SAME PBDATA and LBDATA"
2456 " bits not supported for Block Discard"
2457 " Emulation\n");
2458 return -ENOSYS;
2459 }
2460 /*
2461 * Currently for the emulated case we only accept
2462 * tpws with the UNMAP=1 bit set.
2463 */
2464 if (!(flags[0] & 0x08)) {
2465 pr_err("WRITE_SAME w/o UNMAP bit not"
2466 " supported for Block Discard Emulation\n");
2467 return -ENOSYS;
2468 }
2469 }
2470
2471 return 0;
2472}
2473
2474static int transport_generic_cmd_sequencer(
2475 struct se_cmd *cmd,
2476 unsigned char *cdb)
2477{
2478 struct se_device *dev = cmd->se_dev;
2479 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2480 int sector_ret = 0, passthrough;
2481 u32 sectors = 0, size = 0;
2482 u16 service_action;
2483 int ret;
2484
2485 /*
2486 * If we operate in passthrough mode we skip most CDB emulation and
2487 * instead hand the commands down to the physical SCSI device.
2488 */
2489 passthrough =
2490 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2491
2492 switch (cdb[0]) {
2493 case READ_6:
2494 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2495 if (sector_ret)
2496 goto out_unsupported_cdb;
2497 size = transport_get_size(sectors, cdb, cmd);
2498 cmd->t_task_lba = transport_lba_21(cdb);
2499 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2500 break;
2501 case READ_10:
2502 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2503 if (sector_ret)
2504 goto out_unsupported_cdb;
2505 size = transport_get_size(sectors, cdb, cmd);
2506 cmd->t_task_lba = transport_lba_32(cdb);
2507 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2508 break;
2509 case READ_12:
2510 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2511 if (sector_ret)
2512 goto out_unsupported_cdb;
2513 size = transport_get_size(sectors, cdb, cmd);
2514 cmd->t_task_lba = transport_lba_32(cdb);
2515 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2516 break;
2517 case READ_16:
2518 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2519 if (sector_ret)
2520 goto out_unsupported_cdb;
2521 size = transport_get_size(sectors, cdb, cmd);
2522 cmd->t_task_lba = transport_lba_64(cdb);
2523 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2524 break;
2525 case WRITE_6:
2526 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2527 if (sector_ret)
2528 goto out_unsupported_cdb;
2529 size = transport_get_size(sectors, cdb, cmd);
2530 cmd->t_task_lba = transport_lba_21(cdb);
2531 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2532 break;
2533 case WRITE_10:
2534 case WRITE_VERIFY:
2535 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2536 if (sector_ret)
2537 goto out_unsupported_cdb;
2538 size = transport_get_size(sectors, cdb, cmd);
2539 cmd->t_task_lba = transport_lba_32(cdb);
2540 if (cdb[1] & 0x8)
2541 cmd->se_cmd_flags |= SCF_FUA;
2542 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2543 break;
2544 case WRITE_12:
2545 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2546 if (sector_ret)
2547 goto out_unsupported_cdb;
2548 size = transport_get_size(sectors, cdb, cmd);
2549 cmd->t_task_lba = transport_lba_32(cdb);
2550 if (cdb[1] & 0x8)
2551 cmd->se_cmd_flags |= SCF_FUA;
2552 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2553 break;
2554 case WRITE_16:
2555 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2556 if (sector_ret)
2557 goto out_unsupported_cdb;
2558 size = transport_get_size(sectors, cdb, cmd);
2559 cmd->t_task_lba = transport_lba_64(cdb);
2560 if (cdb[1] & 0x8)
2561 cmd->se_cmd_flags |= SCF_FUA;
2562 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2563 break;
2564 case XDWRITEREAD_10:
2565 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2566 !(cmd->se_cmd_flags & SCF_BIDI))
2567 goto out_invalid_cdb_field;
2568 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2569 if (sector_ret)
2570 goto out_unsupported_cdb;
2571 size = transport_get_size(sectors, cdb, cmd);
2572 cmd->t_task_lba = transport_lba_32(cdb);
2573 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2574
2575 /*
2576 * Do now allow BIDI commands for passthrough mode.
2577 */
2578 if (passthrough)
2579 goto out_unsupported_cdb;
2580
2581 /*
2582 * Setup BIDI XOR callback to be run after I/O completion.
2583 */
2584 cmd->transport_complete_callback = &transport_xor_callback;
2585 if (cdb[1] & 0x8)
2586 cmd->se_cmd_flags |= SCF_FUA;
2587 break;
2588 case VARIABLE_LENGTH_CMD:
2589 service_action = get_unaligned_be16(&cdb[8]);
2590 switch (service_action) {
2591 case XDWRITEREAD_32:
2592 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2593 if (sector_ret)
2594 goto out_unsupported_cdb;
2595 size = transport_get_size(sectors, cdb, cmd);
2596 /*
2597 * Use WRITE_32 and READ_32 opcodes for the emulated
2598 * XDWRITE_READ_32 logic.
2599 */
2600 cmd->t_task_lba = transport_lba_64_ext(cdb);
2601 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
2602
2603 /*
2604 * Do now allow BIDI commands for passthrough mode.
2605 */
2606 if (passthrough)
2607 goto out_unsupported_cdb;
2608
2609 /*
2610 * Setup BIDI XOR callback to be run during after I/O
2611 * completion.
2612 */
2613 cmd->transport_complete_callback = &transport_xor_callback;
2614 if (cdb[1] & 0x8)
2615 cmd->se_cmd_flags |= SCF_FUA;
2616 break;
2617 case WRITE_SAME_32:
2618 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2619 if (sector_ret)
2620 goto out_unsupported_cdb;
2621
2622 if (sectors)
2623 size = transport_get_size(1, cdb, cmd);
2624 else {
2625 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2626 " supported\n");
2627 goto out_invalid_cdb_field;
2628 }
2629
2630 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2631
2632 if (target_check_write_same_discard(&cdb[10], dev) < 0)
2633 goto out_unsupported_cdb;
2634 if (!passthrough)
2635 cmd->execute_cmd = target_emulate_write_same;
2636 break;
2637 default:
2638 pr_err("VARIABLE_LENGTH_CMD service action"
2639 " 0x%04x not supported\n", service_action);
2640 goto out_unsupported_cdb;
2641 }
2642 break;
2643 case MAINTENANCE_IN:
2644 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2645 /* MAINTENANCE_IN from SCC-2 */
2646 /*
2647 * Check for emulated MI_REPORT_TARGET_PGS.
2648 */
2649 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
2650 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2651 cmd->execute_cmd =
2652 target_emulate_report_target_port_groups;
2653 }
2654 size = (cdb[6] << 24) | (cdb[7] << 16) |
2655 (cdb[8] << 8) | cdb[9];
2656 } else {
2657 /* GPCMD_SEND_KEY from multi media commands */
2658 size = (cdb[8] << 8) + cdb[9];
2659 }
2660 break;
2661 case GPCMD_READ_BUFFER_CAPACITY:
2662 case GPCMD_SEND_OPC:
2663 size = (cdb[7] << 8) + cdb[8];
2664 break;
2665 case READ_BLOCK_LIMITS:
2666 size = READ_BLOCK_LEN;
2667 break;
2668 case GPCMD_GET_CONFIGURATION:
2669 case GPCMD_READ_FORMAT_CAPACITIES:
2670 case GPCMD_READ_DISC_INFO:
2671 case GPCMD_READ_TRACK_RZONE_INFO:
2672 size = (cdb[7] << 8) + cdb[8];
2673 break;
2674 case GPCMD_MECHANISM_STATUS:
2675 case GPCMD_READ_DVD_STRUCTURE:
2676 size = (cdb[8] << 8) + cdb[9];
2677 break;
2678 case READ_POSITION:
2679 size = READ_POSITION_LEN;
2680 break;
2681 case MAINTENANCE_OUT:
2682 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2683 /* MAINTENANCE_OUT from SCC-2
2684 *
2685 * Check for emulated MO_SET_TARGET_PGS.
2686 */
2687 if (cdb[1] == MO_SET_TARGET_PGS &&
2688 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2689 cmd->execute_cmd =
2690 target_emulate_set_target_port_groups;
2691 }
2692
2693 size = (cdb[6] << 24) | (cdb[7] << 16) |
2694 (cdb[8] << 8) | cdb[9];
2695 } else {
2696 /* GPCMD_REPORT_KEY from multi media commands */
2697 size = (cdb[8] << 8) + cdb[9];
2698 }
2699 break;
2700 case READ_BUFFER:
2701 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2702 break;
2703 case READ_CAPACITY:
2704 size = READ_CAP_LEN;
2705 if (!passthrough)
2706 cmd->execute_cmd = target_emulate_readcapacity;
2707 break;
2708 case READ_MEDIA_SERIAL_NUMBER:
2709 case SERVICE_ACTION_IN:
2710 switch (cmd->t_task_cdb[1] & 0x1f) {
2711 case SAI_READ_CAPACITY_16:
2712 if (!passthrough)
2713 cmd->execute_cmd =
2714 target_emulate_readcapacity_16;
2715 break;
2716 default:
2717 if (passthrough)
2718 break;
2719
2720 pr_err("Unsupported SA: 0x%02x\n",
2721 cmd->t_task_cdb[1] & 0x1f);
2722 goto out_invalid_cdb_field;
2723 }
2724 /*FALLTHROUGH*/
2725 case ACCESS_CONTROL_IN:
2726 case ACCESS_CONTROL_OUT:
2727 size = (cdb[10] << 24) | (cdb[11] << 16) |
2728 (cdb[12] << 8) | cdb[13];
2729 break;
2730/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2731#if 0
2732 case GPCMD_READ_CD:
2733 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2734 size = (2336 * sectors);
2735 break;
2736#endif
2737 case READ_TOC:
2738 size = cdb[8];
2739 break;
2740 case READ_ELEMENT_STATUS:
2741 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2742 break;
2743 case SYNCHRONIZE_CACHE:
2744 case SYNCHRONIZE_CACHE_16:
2745 /*
2746 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2747 */
2748 if (cdb[0] == SYNCHRONIZE_CACHE) {
2749 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2750 cmd->t_task_lba = transport_lba_32(cdb);
2751 } else {
2752 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2753 cmd->t_task_lba = transport_lba_64(cdb);
2754 }
2755 if (sector_ret)
2756 goto out_unsupported_cdb;
2757
2758 size = transport_get_size(sectors, cdb, cmd);
2759
2760 if (passthrough)
2761 break;
2762
2763 /*
2764 * Check to ensure that LBA + Range does not exceed past end of
2765 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2766 */
2767 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2768 if (transport_cmd_get_valid_sectors(cmd) < 0)
2769 goto out_invalid_cdb_field;
2770 }
2771 cmd->execute_cmd = target_emulate_synchronize_cache;
2772 break;
2773 case UNMAP:
2774 size = get_unaligned_be16(&cdb[7]);
2775 if (!passthrough)
2776 cmd->execute_cmd = target_emulate_unmap;
2777 break;
2778 case WRITE_SAME_16:
2779 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2780 if (sector_ret)
2781 goto out_unsupported_cdb;
2782
2783 if (sectors)
2784 size = transport_get_size(1, cdb, cmd);
2785 else {
2786 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2787 goto out_invalid_cdb_field;
2788 }
2789
2790 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2791
2792 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2793 goto out_unsupported_cdb;
2794 if (!passthrough)
2795 cmd->execute_cmd = target_emulate_write_same;
2796 break;
2797 case WRITE_SAME:
2798 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2799 if (sector_ret)
2800 goto out_unsupported_cdb;
2801
2802 if (sectors)
2803 size = transport_get_size(1, cdb, cmd);
2804 else {
2805 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2806 goto out_invalid_cdb_field;
2807 }
2808
2809 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2810 /*
2811 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
2812 * of byte 1 bit 3 UNMAP instead of original reserved field
2813 */
2814 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2815 goto out_unsupported_cdb;
2816 if (!passthrough)
2817 cmd->execute_cmd = target_emulate_write_same;
2818 break;
2819 case ALLOW_MEDIUM_REMOVAL:
2820 case ERASE:
2821 case REZERO_UNIT:
2822 case SEEK_10:
2823 case SPACE:
2824 case START_STOP:
2825 case VERIFY:
2826 case WRITE_FILEMARKS:
2827 if (!passthrough)
2828 cmd->execute_cmd = target_emulate_noop;
2829 break;
2830 case GPCMD_CLOSE_TRACK:
2831 case INITIALIZE_ELEMENT_STATUS:
2832 case GPCMD_LOAD_UNLOAD:
2833 case GPCMD_SET_SPEED:
2834 case MOVE_MEDIUM:
2835 break;
2836 case GET_EVENT_STATUS_NOTIFICATION:
2837 size = (cdb[7] << 8) | cdb[8];
2838 break;
2839 case ATA_16:
2840 /* Only support ATA passthrough to pSCSI backends.. */
2841 if (!passthrough)
2842 goto out_unsupported_cdb;
2843
2844 /* T_LENGTH */
2845 switch (cdb[2] & 0x3) {
2846 case 0x0:
2847 sectors = 0;
2848 break;
2849 case 0x1:
2850 sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
2851 break;
2852 case 0x2:
2853 sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
2854 break;
2855 case 0x3:
2856 pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
2857 goto out_invalid_cdb_field;
2858 }
2859
2860 /* BYTE_BLOCK */
2861 if (cdb[2] & 0x4) {
2862 /* BLOCK T_TYPE: 512 or sector */
2863 size = sectors * ((cdb[2] & 0x10) ?
2864 dev->se_sub_dev->se_dev_attrib.block_size : 512);
2865 } else {
2866 /* BYTE */
2867 size = sectors;
2868 }
2869 break;
2870 default:
2871 ret = spc_parse_cdb(cmd, &size, passthrough);
2872 if (ret)
2873 return ret;
2874 }
2875
2876 ret = target_cmd_size_check(cmd, size);
2877 if (ret)
2878 return ret;
2879
2880 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
2881 if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
2882 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
2883 " big sectors %u exceeds fabric_max_sectors:"
2884 " %u\n", cdb[0], sectors,
2885 su_dev->se_dev_attrib.fabric_max_sectors);
2886 goto out_invalid_cdb_field;
2887 }
2888 if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
2889 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
2890 " big sectors %u exceeds backend hw_max_sectors:"
2891 " %u\n", cdb[0], sectors,
2892 su_dev->se_dev_attrib.hw_max_sectors);
2893 goto out_invalid_cdb_field;
2894 }
2895 }
2896
2897 /* reject any command that we don't have a handler for */
2898 if (!(passthrough || cmd->execute_cmd ||
2899 (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)))
2900 goto out_unsupported_cdb;
2901
2902 return 0;
2903
2904out_unsupported_cdb:
2905 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2906 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2907 return -EINVAL;
2908out_invalid_cdb_field:
2909 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2910 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2911 return -EINVAL;
2912}
2913
2914/* 2192/*
2915 * Called from I/O completion to determine which dormant/delayed 2193 * Called from I/O completion to determine which dormant/delayed
2916 * and ordered cmds need to have their tasks added to the execution queue. 2194 * and ordered cmds need to have their tasks added to the execution queue.
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 2d7db85e93ae..f4f1eef6bf55 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -24,6 +24,8 @@ struct se_subsystem_api {
24 struct se_subsystem_dev *, void *); 24 struct se_subsystem_dev *, void *);
25 void (*free_device)(void *); 25 void (*free_device)(void *);
26 int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); 26 int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
27
28 int (*parse_cdb)(struct se_cmd *cmd, unsigned int *size);
27 int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32, 29 int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32,
28 enum dma_data_direction); 30 enum dma_data_direction);
29 int (*do_discard)(struct se_device *, sector_t, u32); 31 int (*do_discard)(struct se_device *, sector_t, u32);
@@ -49,6 +51,9 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *,
49 51
50void target_complete_cmd(struct se_cmd *, u8); 52void target_complete_cmd(struct se_cmd *, u8);
51 53
54int sbc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
55int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough);
56
52void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 57void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
53int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); 58int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
54int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); 59int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);