aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDamien Le Moal <damien.lemoal@wdc.com>2017-10-28 03:39:34 -0400
committerMike Snitzer <snitzer@redhat.com>2017-11-10 15:44:53 -0500
commit114e025968b5990ad0b57bf60697ea64ee206aac (patch)
tree443eee16ccd2842c0abccd3da5cd0717dd2c5421
parentfbc61291d7da41ec19f339311297f59213165227 (diff)
dm zoned: ignore last smaller runt zone
The SCSI layer allows ZBC drives to have a smaller last runt zone. For such a device, specifying the entire capacity for a dm-zoned target table entry fails because the specified capacity is not aligned on a device zone size indicated in the request queue structure of the device. Fix this problem by ignoring the last runt zone in the entry length when seting up the dm-zoned target (ctr method) and when iterating table entries of the target (iterate_devices method). This allows dm-zoned users to still easily setup a target using the entire device capacity (as mandated by dm-zoned) or the aligned capacity excluding the last runt zone. While at it, replace direct references to the device queue chunk_sectors limit with calls to the accessor blk_queue_zone_sectors(). Reported-by: Peter Desnoyers <pjd@ccs.neu.edu> Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-zoned-target.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index b87c1741da4b..6d7bda6f8190 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
660 struct dmz_target *dmz = ti->private; 660 struct dmz_target *dmz = ti->private;
661 struct request_queue *q; 661 struct request_queue *q;
662 struct dmz_dev *dev; 662 struct dmz_dev *dev;
663 sector_t aligned_capacity;
663 int ret; 664 int ret;
664 665
665 /* Get the target device */ 666 /* Get the target device */
@@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
685 goto err; 686 goto err;
686 } 687 }
687 688
689 q = bdev_get_queue(dev->bdev);
688 dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; 690 dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
689 if (ti->begin || (ti->len != dev->capacity)) { 691 aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
692 if (ti->begin ||
693 ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
690 ti->error = "Partial mapping not supported"; 694 ti->error = "Partial mapping not supported";
691 ret = -EINVAL; 695 ret = -EINVAL;
692 goto err; 696 goto err;
693 } 697 }
694 698
695 q = bdev_get_queue(dev->bdev); 699 dev->zone_nr_sectors = blk_queue_zone_sectors(q);
696 dev->zone_nr_sectors = q->limits.chunk_sectors;
697 dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); 700 dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
698 701
699 dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); 702 dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
@@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
929 iterate_devices_callout_fn fn, void *data) 932 iterate_devices_callout_fn fn, void *data)
930{ 933{
931 struct dmz_target *dmz = ti->private; 934 struct dmz_target *dmz = ti->private;
935 struct dmz_dev *dev = dmz->dev;
936 sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
932 937
933 return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data); 938 return fn(ti, dmz->ddev, 0, capacity, data);
934} 939}
935 940
936static struct target_type dmz_type = { 941static struct target_type dmz_type = {