aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorRoland Dreier <roland@purestorage.com>2012-02-13 19:18:17 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2012-02-25 17:37:49 -0500
commit015487b89f27d91d95a056cdc3c85e6c729bff12 (patch)
tree0cecb2acc903154e25abb23e8f345f301fcd1ef5 /drivers/target
parenteffc6cc8828257c32c37635e737f14fd6e19ecd7 (diff)
target: Untangle front-end and back-end meanings of max_sectors attribute
se_dev_attrib.max_sectors currently has two independent meanings: - It is reported in the block limits VPD page as the maximum transfer length, ie the largest IO that the front-end (fabric) can handle. Also the target core doesn't enforce this maximum transfer length. - It is used to hold the size of the largest IO that the back-end can handle, so we know when to split SCSI commands into multiple tasks. Fix this by adding a new se_dev_attrib.fabric_max_sectors to hold the maximum transfer length, and checking incoming IOs against that limit. Signed-off-by: Roland Dreier <roland@purestorage.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_cdb.c2
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c65
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_transport.c8
5 files changed, 73 insertions, 7 deletions
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 4f65b258cc25..41ca2d43377b 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -456,7 +456,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
456 /* 456 /*
457 * Set MAXIMUM TRANSFER LENGTH 457 * Set MAXIMUM TRANSFER LENGTH
458 */ 458 */
459 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); 459 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
460 460
461 /* 461 /*
462 * Set OPTIMAL TRANSFER LENGTH 462 * Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0700d3b3d1c0..ac0ee5021c29 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -702,6 +702,9 @@ SE_DEV_ATTR_RO(hw_max_sectors);
702DEF_DEV_ATTRIB(max_sectors); 702DEF_DEV_ATTRIB(max_sectors);
703SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); 703SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
704 704
705DEF_DEV_ATTRIB(fabric_max_sectors);
706SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
707
705DEF_DEV_ATTRIB(optimal_sectors); 708DEF_DEV_ATTRIB(optimal_sectors);
706SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); 709SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
707 710
@@ -741,6 +744,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
741 &target_core_dev_attrib_block_size.attr, 744 &target_core_dev_attrib_block_size.attr,
742 &target_core_dev_attrib_hw_max_sectors.attr, 745 &target_core_dev_attrib_hw_max_sectors.attr,
743 &target_core_dev_attrib_max_sectors.attr, 746 &target_core_dev_attrib_max_sectors.attr,
747 &target_core_dev_attrib_fabric_max_sectors.attr,
744 &target_core_dev_attrib_optimal_sectors.attr, 748 &target_core_dev_attrib_optimal_sectors.attr,
745 &target_core_dev_attrib_hw_queue_depth.attr, 749 &target_core_dev_attrib_hw_queue_depth.attr,
746 &target_core_dev_attrib_queue_depth.attr, 750 &target_core_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 0b25b50900e9..27da4e4e07c6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -889,10 +889,15 @@ void se_dev_set_default_attribs(
889 limits->logical_block_size); 889 limits->logical_block_size);
890 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 890 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
891 /* 891 /*
892 * Set optimal_sectors from max_sectors, which can be lowered via 892 * Set fabric_max_sectors, which is reported in block limits
893 * configfs. 893 * VPD page (B0h).
894 */ 894 */
895 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; 895 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
896 /*
897 * Set optimal_sectors from fabric_max_sectors, which can be
898 * lowered via configfs.
899 */
900 dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
896 /* 901 /*
897 * queue_depth is based on subsystem plugin dependent requirements. 902 * queue_depth is based on subsystem plugin dependent requirements.
898 */ 903 */
@@ -1224,6 +1229,54 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1224 return 0; 1229 return 0;
1225} 1230}
1226 1231
1232int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1233{
1234 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1235 pr_err("dev[%p]: Unable to change SE Device"
1236 " fabric_max_sectors while dev_export_obj: %d count exists\n",
1237 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1238 return -EINVAL;
1239 }
1240 if (!fabric_max_sectors) {
1241 pr_err("dev[%p]: Illegal ZERO value for"
1242 " fabric_max_sectors\n", dev);
1243 return -EINVAL;
1244 }
1245 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1246 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1247 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1248 DA_STATUS_MAX_SECTORS_MIN);
1249 return -EINVAL;
1250 }
1251 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1252 if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1253 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1254 " greater than TCM/SE_Device max_sectors:"
1255 " %u\n", dev, fabric_max_sectors,
1256 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1257 return -EINVAL;
1258 }
1259 } else {
1260 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1261 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1262 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1263 " %u\n", dev, fabric_max_sectors,
1264 DA_STATUS_MAX_SECTORS_MAX);
1265 return -EINVAL;
1266 }
1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1273
1274 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1276 dev, fabric_max_sectors);
1277 return 0;
1278}
1279
1227int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1280int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1228{ 1281{
1229 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1282 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@@ -1237,10 +1290,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1237 " changed for TCM/pSCSI\n", dev); 1290 " changed for TCM/pSCSI\n", dev);
1238 return -EINVAL; 1291 return -EINVAL;
1239 } 1292 }
1240 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { 1293 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
1241 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1294 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1242 " greater than max_sectors: %u\n", dev, 1295 " greater than fabric_max_sectors: %u\n", dev,
1243 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 1296 optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
1244 return -EINVAL; 1297 return -EINVAL;
1245 } 1298 }
1246 1299
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index b026dedb8184..21c05638f158 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -53,6 +53,7 @@ int se_dev_set_is_nonrot(struct se_device *, int);
53int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 53int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
54int se_dev_set_queue_depth(struct se_device *, u32); 54int se_dev_set_queue_depth(struct se_device *, u32);
55int se_dev_set_max_sectors(struct se_device *, u32); 55int se_dev_set_max_sectors(struct se_device *, u32);
56int se_dev_set_fabric_max_sectors(struct se_device *, u32);
56int se_dev_set_optimal_sectors(struct se_device *, u32); 57int se_dev_set_optimal_sectors(struct se_device *, u32);
57int se_dev_set_block_size(struct se_device *, u32); 58int se_dev_set_block_size(struct se_device *, u32);
58struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, 59struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 19804b6fdbaa..b79c6a2824ee 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -37,6 +37,7 @@
37#include <linux/in.h> 37#include <linux/in.h>
38#include <linux/cdrom.h> 38#include <linux/cdrom.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/ratelimit.h>
40#include <asm/unaligned.h> 41#include <asm/unaligned.h>
41#include <net/sock.h> 42#include <net/sock.h>
42#include <net/tcp.h> 43#include <net/tcp.h>
@@ -3107,6 +3108,13 @@ static int transport_generic_cmd_sequencer(
3107 cmd->data_length = size; 3108 cmd->data_length = size;
3108 } 3109 }
3109 3110
3111 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
3112 sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
3113 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
3114 cdb[0], sectors);
3115 goto out_invalid_cdb_field;
3116 }
3117
3110 /* reject any command that we don't have a handler for */ 3118 /* reject any command that we don't have a handler for */
3111 if (!(passthrough || cmd->execute_task || 3119 if (!(passthrough || cmd->execute_task ||
3112 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3120 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))