aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/target/target_core_cdb.c2
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c65
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--include/target/target_core_base.h3
6 files changed, 76 insertions, 7 deletions
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 4f65b258cc25..41ca2d43377b 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -456,7 +456,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
456 /* 456 /*
457 * Set MAXIMUM TRANSFER LENGTH 457 * Set MAXIMUM TRANSFER LENGTH
458 */ 458 */
459 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); 459 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
460 460
461 /* 461 /*
462 * Set OPTIMAL TRANSFER LENGTH 462 * Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0700d3b3d1c0..ac0ee5021c29 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -702,6 +702,9 @@ SE_DEV_ATTR_RO(hw_max_sectors);
702DEF_DEV_ATTRIB(max_sectors); 702DEF_DEV_ATTRIB(max_sectors);
703SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); 703SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
704 704
705DEF_DEV_ATTRIB(fabric_max_sectors);
706SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
707
705DEF_DEV_ATTRIB(optimal_sectors); 708DEF_DEV_ATTRIB(optimal_sectors);
706SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); 709SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
707 710
@@ -741,6 +744,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
741 &target_core_dev_attrib_block_size.attr, 744 &target_core_dev_attrib_block_size.attr,
742 &target_core_dev_attrib_hw_max_sectors.attr, 745 &target_core_dev_attrib_hw_max_sectors.attr,
743 &target_core_dev_attrib_max_sectors.attr, 746 &target_core_dev_attrib_max_sectors.attr,
747 &target_core_dev_attrib_fabric_max_sectors.attr,
744 &target_core_dev_attrib_optimal_sectors.attr, 748 &target_core_dev_attrib_optimal_sectors.attr,
745 &target_core_dev_attrib_hw_queue_depth.attr, 749 &target_core_dev_attrib_hw_queue_depth.attr,
746 &target_core_dev_attrib_queue_depth.attr, 750 &target_core_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 0b25b50900e9..27da4e4e07c6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -889,10 +889,15 @@ void se_dev_set_default_attribs(
889 limits->logical_block_size); 889 limits->logical_block_size);
890 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 890 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
891 /* 891 /*
892 * Set optimal_sectors from max_sectors, which can be lowered via 892 * Set fabric_max_sectors, which is reported in block limits
893 * configfs. 893 * VPD page (B0h).
894 */ 894 */
895 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; 895 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
896 /*
897 * Set optimal_sectors from fabric_max_sectors, which can be
898 * lowered via configfs.
899 */
900 dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
896 /* 901 /*
897 * queue_depth is based on subsystem plugin dependent requirements. 902 * queue_depth is based on subsystem plugin dependent requirements.
898 */ 903 */
@@ -1224,6 +1229,54 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1224 return 0; 1229 return 0;
1225} 1230}
1226 1231
1232int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1233{
1234 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1235 pr_err("dev[%p]: Unable to change SE Device"
1236 " fabric_max_sectors while dev_export_obj: %d count exists\n",
1237 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1238 return -EINVAL;
1239 }
1240 if (!fabric_max_sectors) {
1241 pr_err("dev[%p]: Illegal ZERO value for"
1242 " fabric_max_sectors\n", dev);
1243 return -EINVAL;
1244 }
1245 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1246 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1247 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1248 DA_STATUS_MAX_SECTORS_MIN);
1249 return -EINVAL;
1250 }
1251 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1252 if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1253 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1254 " greater than TCM/SE_Device max_sectors:"
1255 " %u\n", dev, fabric_max_sectors,
1256 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1257 return -EINVAL;
1258 }
1259 } else {
1260 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1261 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1262 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1263 " %u\n", dev, fabric_max_sectors,
1264 DA_STATUS_MAX_SECTORS_MAX);
1265 return -EINVAL;
1266 }
1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1273
1274 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1276 dev, fabric_max_sectors);
1277 return 0;
1278}
1279
1227int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1280int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1228{ 1281{
1229 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1282 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@@ -1237,10 +1290,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1237 " changed for TCM/pSCSI\n", dev); 1290 " changed for TCM/pSCSI\n", dev);
1238 return -EINVAL; 1291 return -EINVAL;
1239 } 1292 }
1240 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { 1293 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
1241 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1294 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1242 " greater than max_sectors: %u\n", dev, 1295 " greater than fabric_max_sectors: %u\n", dev,
1243 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 1296 optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
1244 return -EINVAL; 1297 return -EINVAL;
1245 } 1298 }
1246 1299
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index b026dedb8184..21c05638f158 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -53,6 +53,7 @@ int se_dev_set_is_nonrot(struct se_device *, int);
53int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 53int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
54int se_dev_set_queue_depth(struct se_device *, u32); 54int se_dev_set_queue_depth(struct se_device *, u32);
55int se_dev_set_max_sectors(struct se_device *, u32); 55int se_dev_set_max_sectors(struct se_device *, u32);
56int se_dev_set_fabric_max_sectors(struct se_device *, u32);
56int se_dev_set_optimal_sectors(struct se_device *, u32); 57int se_dev_set_optimal_sectors(struct se_device *, u32);
57int se_dev_set_block_size(struct se_device *, u32); 58int se_dev_set_block_size(struct se_device *, u32);
58struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, 59struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 19804b6fdbaa..b79c6a2824ee 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -37,6 +37,7 @@
37#include <linux/in.h> 37#include <linux/in.h>
38#include <linux/cdrom.h> 38#include <linux/cdrom.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/ratelimit.h>
40#include <asm/unaligned.h> 41#include <asm/unaligned.h>
41#include <net/sock.h> 42#include <net/sock.h>
42#include <net/tcp.h> 43#include <net/tcp.h>
@@ -3107,6 +3108,13 @@ static int transport_generic_cmd_sequencer(
3107 cmd->data_length = size; 3108 cmd->data_length = size;
3108 } 3109 }
3109 3110
3111 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
3112 sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
3113 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
3114 cdb[0], sectors);
3115 goto out_invalid_cdb_field;
3116 }
3117
3110 /* reject any command that we don't have a handler for */ 3118 /* reject any command that we don't have a handler for */
3111 if (!(passthrough || cmd->execute_task || 3119 if (!(passthrough || cmd->execute_task ||
3112 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3120 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 99d7373ef834..1641dea0c282 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -86,6 +86,8 @@
86#define DA_UNMAP_GRANULARITY_DEFAULT 0 86#define DA_UNMAP_GRANULARITY_DEFAULT 0
87/* Default unmap_granularity_alignment */ 87/* Default unmap_granularity_alignment */
88#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 88#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
89/* Default max transfer length */
90#define DA_FABRIC_MAX_SECTORS 8192
89/* Emulation for Direct Page Out */ 91/* Emulation for Direct Page Out */
90#define DA_EMULATE_DPO 0 92#define DA_EMULATE_DPO 0
91/* Emulation for Forced Unit Access WRITEs */ 93/* Emulation for Forced Unit Access WRITEs */
@@ -726,6 +728,7 @@ struct se_dev_attrib {
726 u32 block_size; 728 u32 block_size;
727 u32 hw_max_sectors; 729 u32 hw_max_sectors;
728 u32 max_sectors; 730 u32 max_sectors;
731 u32 fabric_max_sectors;
729 u32 optimal_sectors; 732 u32 optimal_sectors;
730 u32 hw_queue_depth; 733 u32 hw_queue_depth;
731 u32 queue_depth; 734 u32 queue_depth;