diff options
author | Andy Grover <agrover@redhat.com> | 2013-11-11 11:59:17 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2013-11-12 16:44:54 -0500 |
commit | 7f7caf6aa74a4f4ad21ebe08bf23b594fce45ca7 (patch) | |
tree | 058c55e859712486b9f6a8a20f3ea46add3a30eb | |
parent | f01b9f73392b48c6cda7c2c66594c73137c776da (diff) |
target: Pass through I/O topology for block backstores
In addition to block size (already implemented), passing through
alignment offset, logical-to-phys block exponent, I/O granularity and
optimal I/O length will allow initiators to properly handle layout on
LUNs with 4K block sizes.
Tested with various weird values via scsi_debug module.
One thing to look at with this patch is the new block limits values --
instead of granularity 1 optimal 8192, Lio will now be returning whatever
the block device says, which may affect performance.
Signed-off-by: Andy Grover <agrover@redhat.com>
Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r-- | drivers/target/target_core_iblock.c | 43 | ||||
-rw-r--r-- | drivers/target/target_core_sbc.c | 12 | ||||
-rw-r--r-- | drivers/target/target_core_spc.c | 11 | ||||
-rw-r--r-- | include/target/target_core_backend.h | 5 |
4 files changed, 68 insertions, 3 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index b9a3394fe479..c87959f12760 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev) | |||
710 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); | 710 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); |
711 | } | 711 | } |
712 | 712 | ||
713 | static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) | ||
714 | { | ||
715 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | ||
716 | struct block_device *bd = ib_dev->ibd_bd; | ||
717 | int ret; | ||
718 | |||
719 | ret = bdev_alignment_offset(bd); | ||
720 | if (ret == -1) | ||
721 | return 0; | ||
722 | |||
723 | /* convert offset-bytes to offset-lbas */ | ||
724 | return ret / bdev_logical_block_size(bd); | ||
725 | } | ||
726 | |||
727 | static unsigned int iblock_get_lbppbe(struct se_device *dev) | ||
728 | { | ||
729 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | ||
730 | struct block_device *bd = ib_dev->ibd_bd; | ||
731 | int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); | ||
732 | |||
733 | return ilog2(logs_per_phys); | ||
734 | } | ||
735 | |||
736 | static unsigned int iblock_get_io_min(struct se_device *dev) | ||
737 | { | ||
738 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | ||
739 | struct block_device *bd = ib_dev->ibd_bd; | ||
740 | |||
741 | return bdev_io_min(bd); | ||
742 | } | ||
743 | |||
744 | static unsigned int iblock_get_io_opt(struct se_device *dev) | ||
745 | { | ||
746 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | ||
747 | struct block_device *bd = ib_dev->ibd_bd; | ||
748 | |||
749 | return bdev_io_opt(bd); | ||
750 | } | ||
751 | |||
713 | static struct sbc_ops iblock_sbc_ops = { | 752 | static struct sbc_ops iblock_sbc_ops = { |
714 | .execute_rw = iblock_execute_rw, | 753 | .execute_rw = iblock_execute_rw, |
715 | .execute_sync_cache = iblock_execute_sync_cache, | 754 | .execute_sync_cache = iblock_execute_sync_cache, |
@@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = { | |||
749 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | 788 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
750 | .get_device_type = sbc_get_device_type, | 789 | .get_device_type = sbc_get_device_type, |
751 | .get_blocks = iblock_get_blocks, | 790 | .get_blocks = iblock_get_blocks, |
791 | .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, | ||
792 | .get_lbppbe = iblock_get_lbppbe, | ||
793 | .get_io_min = iblock_get_io_min, | ||
794 | .get_io_opt = iblock_get_io_opt, | ||
752 | .get_write_cache = iblock_get_write_cache, | 795 | .get_write_cache = iblock_get_write_cache, |
753 | }; | 796 | }; |
754 | 797 | ||
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 6c17295e8d7c..61a30f0d7583 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) | |||
105 | buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; | 105 | buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; |
106 | buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; | 106 | buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; |
107 | buf[11] = dev->dev_attrib.block_size & 0xff; | 107 | buf[11] = dev->dev_attrib.block_size & 0xff; |
108 | |||
109 | if (dev->transport->get_lbppbe) | ||
110 | buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; | ||
111 | |||
112 | if (dev->transport->get_alignment_offset_lbas) { | ||
113 | u16 lalba = dev->transport->get_alignment_offset_lbas(dev); | ||
114 | buf[14] = (lalba >> 8) & 0x3f; | ||
115 | buf[15] = lalba & 0xff; | ||
116 | } | ||
117 | |||
108 | /* | 118 | /* |
109 | * Set Thin Provisioning Enable bit following sbc3r22 in section | 119 | * Set Thin Provisioning Enable bit following sbc3r22 in section |
110 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. | 120 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. |
111 | */ | 121 | */ |
112 | if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) | 122 | if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) |
113 | buf[14] = 0x80; | 123 | buf[14] |= 0x80; |
114 | 124 | ||
115 | rbuf = transport_kmap_data_sg(cmd); | 125 | rbuf = transport_kmap_data_sg(cmd); |
116 | if (rbuf) { | 126 | if (rbuf) { |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 074539558a54..f89a86f29ee3 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
452 | struct se_device *dev = cmd->se_dev; | 452 | struct se_device *dev = cmd->se_dev; |
453 | u32 max_sectors; | 453 | u32 max_sectors; |
454 | int have_tp = 0; | 454 | int have_tp = 0; |
455 | int opt, min; | ||
455 | 456 | ||
456 | /* | 457 | /* |
457 | * Following spc3r22 section 6.5.3 Block Limits VPD page, when | 458 | * Following spc3r22 section 6.5.3 Block Limits VPD page, when |
@@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
475 | /* | 476 | /* |
476 | * Set OPTIMAL TRANSFER LENGTH GRANULARITY | 477 | * Set OPTIMAL TRANSFER LENGTH GRANULARITY |
477 | */ | 478 | */ |
478 | put_unaligned_be16(1, &buf[6]); | 479 | if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) |
480 | put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); | ||
481 | else | ||
482 | put_unaligned_be16(1, &buf[6]); | ||
479 | 483 | ||
480 | /* | 484 | /* |
481 | * Set MAXIMUM TRANSFER LENGTH | 485 | * Set MAXIMUM TRANSFER LENGTH |
@@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
487 | /* | 491 | /* |
488 | * Set OPTIMAL TRANSFER LENGTH | 492 | * Set OPTIMAL TRANSFER LENGTH |
489 | */ | 493 | */ |
490 | put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); | 494 | if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) |
495 | put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); | ||
496 | else | ||
497 | put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); | ||
491 | 498 | ||
492 | /* | 499 | /* |
493 | * Exit now if we don't support TP. | 500 | * Exit now if we don't support TP. |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 5ebe21cd5d1c..39e0114d70c5 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -34,6 +34,11 @@ struct se_subsystem_api { | |||
34 | sense_reason_t (*parse_cdb)(struct se_cmd *cmd); | 34 | sense_reason_t (*parse_cdb)(struct se_cmd *cmd); |
35 | u32 (*get_device_type)(struct se_device *); | 35 | u32 (*get_device_type)(struct se_device *); |
36 | sector_t (*get_blocks)(struct se_device *); | 36 | sector_t (*get_blocks)(struct se_device *); |
37 | sector_t (*get_alignment_offset_lbas)(struct se_device *); | ||
38 | /* lbppbe = logical blocks per physical block exponent. see SBC-3 */ | ||
39 | unsigned int (*get_lbppbe)(struct se_device *); | ||
40 | unsigned int (*get_io_min)(struct se_device *); | ||
41 | unsigned int (*get_io_opt)(struct se_device *); | ||
37 | unsigned char *(*get_sense_buffer)(struct se_cmd *); | 42 | unsigned char *(*get_sense_buffer)(struct se_cmd *); |
38 | bool (*get_write_cache)(struct se_device *); | 43 | bool (*get_write_cache)(struct se_device *); |
39 | }; | 44 | }; |