aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2012-07-27 10:08:00 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-07-27 10:08:00 -0400
commit542f90381422676544382d4071ba44a2de90a0c1 (patch)
tree0eae6798ab2fdef68a0eb1ea5ac14f3cc3ffb740 /drivers/md
parent1df05483d758ea43abc375869fbe06be506ba827 (diff)
dm: support non power of two target max_io_len
Remove the restriction that limits a target's specified maximum incoming I/O size to be a power of 2. Rename this setting from 'split_io' to the less-ambiguous 'max_io_len'. Change it from sector_t to uint32_t, which is plenty big enough, and introduce a wrapper function dm_set_target_max_io_len() to set it. Use sector_div() to process it now that it is not necessarily a power of 2. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-raid.c11
-rw-r--r--drivers/md/dm-raid1.c6
-rw-r--r--drivers/md/dm-snap.c27
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-thin.c5
-rw-r--r--drivers/md/dm.c35
6 files changed, 60 insertions, 29 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 017c34d78d61..858a8b70811c 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -353,6 +353,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
353{ 353{
354 unsigned i, rebuild_cnt = 0; 354 unsigned i, rebuild_cnt = 0;
355 unsigned long value, region_size = 0; 355 unsigned long value, region_size = 0;
356 sector_t max_io_len;
356 char *key; 357 char *key;
357 358
358 /* 359 /*
@@ -522,14 +523,12 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
522 return -EINVAL; 523 return -EINVAL;
523 524
524 if (rs->md.chunk_sectors) 525 if (rs->md.chunk_sectors)
525 rs->ti->split_io = rs->md.chunk_sectors; 526 max_io_len = rs->md.chunk_sectors;
526 else 527 else
527 rs->ti->split_io = region_size; 528 max_io_len = region_size;
528 529
529 if (rs->md.chunk_sectors) 530 if (dm_set_target_max_io_len(rs->ti, max_io_len))
530 rs->ti->split_io = rs->md.chunk_sectors; 531 return -EINVAL;
531 else
532 rs->ti->split_io = region_size;
533 532
534 /* Assume there are no metadata devices until the drives are parsed */ 533 /* Assume there are no metadata devices until the drives are parsed */
535 rs->md.persistent = 0; 534 rs->md.persistent = 0;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index b58b7a33914a..819ccba65912 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1081,7 +1081,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1081 } 1081 }
1082 1082
1083 ti->private = ms; 1083 ti->private = ms;
1084 ti->split_io = dm_rh_get_region_size(ms->rh); 1084
1085 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1086 if (r)
1087 goto err_free_context;
1088
1085 ti->num_flush_requests = 1; 1089 ti->num_flush_requests = 1;
1086 ti->num_discard_requests = 1; 1090 ti->num_discard_requests = 1;
1087 ti->discard_zeroes_data_unsupported = 1; 1091 ti->discard_zeroes_data_unsupported = 1;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a228c20e40b3..6c0f3e33923a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -691,7 +691,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
691 * Return a minimum chunk size of all snapshots that have the specified origin. 691 * Return a minimum chunk size of all snapshots that have the specified origin.
692 * Return zero if the origin has no snapshots. 692 * Return zero if the origin has no snapshots.
693 */ 693 */
694static sector_t __minimum_chunk_size(struct origin *o) 694static uint32_t __minimum_chunk_size(struct origin *o)
695{ 695{
696 struct dm_snapshot *snap; 696 struct dm_snapshot *snap;
697 unsigned chunk_size = 0; 697 unsigned chunk_size = 0;
@@ -701,7 +701,7 @@ static sector_t __minimum_chunk_size(struct origin *o)
701 chunk_size = min_not_zero(chunk_size, 701 chunk_size = min_not_zero(chunk_size,
702 snap->store->chunk_size); 702 snap->store->chunk_size);
703 703
704 return chunk_size; 704 return (uint32_t) chunk_size;
705} 705}
706 706
707/* 707/*
@@ -1172,7 +1172,10 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1172 ti->error = "Chunk size not set"; 1172 ti->error = "Chunk size not set";
1173 goto bad_read_metadata; 1173 goto bad_read_metadata;
1174 } 1174 }
1175 ti->split_io = s->store->chunk_size; 1175
1176 r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1177 if (r)
1178 goto bad_read_metadata;
1176 1179
1177 return 0; 1180 return 0;
1178 1181
@@ -1239,7 +1242,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
1239 snap_dest->store->snap = snap_dest; 1242 snap_dest->store->snap = snap_dest;
1240 snap_src->store->snap = snap_src; 1243 snap_src->store->snap = snap_src;
1241 1244
1242 snap_dest->ti->split_io = snap_dest->store->chunk_size; 1245 snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1243 snap_dest->valid = snap_src->valid; 1246 snap_dest->valid = snap_src->valid;
1244 1247
1245 /* 1248 /*
@@ -1817,9 +1820,9 @@ static void snapshot_resume(struct dm_target *ti)
1817 up_write(&s->lock); 1820 up_write(&s->lock);
1818} 1821}
1819 1822
1820static sector_t get_origin_minimum_chunksize(struct block_device *bdev) 1823static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1821{ 1824{
1822 sector_t min_chunksize; 1825 uint32_t min_chunksize;
1823 1826
1824 down_read(&_origins_lock); 1827 down_read(&_origins_lock);
1825 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); 1828 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
@@ -1838,9 +1841,9 @@ static void snapshot_merge_resume(struct dm_target *ti)
1838 snapshot_resume(ti); 1841 snapshot_resume(ti);
1839 1842
1840 /* 1843 /*
1841 * snapshot-merge acts as an origin, so set ti->split_io 1844 * snapshot-merge acts as an origin, so set ti->max_io_len
1842 */ 1845 */
1843 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); 1846 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1844 1847
1845 start_merge(s); 1848 start_merge(s);
1846} 1849}
@@ -2073,12 +2076,12 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
2073 struct origin *o; 2076 struct origin *o;
2074 2077
2075 /* 2078 /*
2076 * The origin's __minimum_chunk_size() got stored in split_io 2079 * The origin's __minimum_chunk_size() got stored in max_io_len
2077 * by snapshot_merge_resume(). 2080 * by snapshot_merge_resume().
2078 */ 2081 */
2079 down_read(&_origins_lock); 2082 down_read(&_origins_lock);
2080 o = __lookup_origin(merging_snap->origin->bdev); 2083 o = __lookup_origin(merging_snap->origin->bdev);
2081 for (n = 0; n < size; n += merging_snap->ti->split_io) 2084 for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2082 if (__origin_write(&o->snapshots, sector + n, NULL) == 2085 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2083 DM_MAPIO_SUBMITTED) 2086 DM_MAPIO_SUBMITTED)
2084 must_wait = 1; 2087 must_wait = 1;
@@ -2138,14 +2141,14 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
2138} 2141}
2139 2142
2140/* 2143/*
2141 * Set the target "split_io" field to the minimum of all the snapshots' 2144 * Set the target "max_io_len" field to the minimum of all the snapshots'
2142 * chunk sizes. 2145 * chunk sizes.
2143 */ 2146 */
2144static void origin_resume(struct dm_target *ti) 2147static void origin_resume(struct dm_target *ti)
2145{ 2148{
2146 struct dm_dev *dev = ti->private; 2149 struct dm_dev *dev = ti->private;
2147 2150
2148 ti->split_io = get_origin_minimum_chunksize(dev->bdev); 2151 ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
2149} 2152}
2150 2153
2151static int origin_status(struct dm_target *ti, status_type_t type, char *result, 2154static int origin_status(struct dm_target *ti, status_type_t type, char *result,
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 6931bd18b615..992c9d4c3bd9 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -165,7 +165,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
165 else 165 else
166 sc->stripes_shift = __ffs(stripes); 166 sc->stripes_shift = __ffs(stripes);
167 167
168 ti->split_io = chunk_size; 168 r = dm_set_target_max_io_len(ti, chunk_size);
169 if (r)
170 return r;
171
169 ti->num_flush_requests = stripes; 172 ti->num_flush_requests = stripes;
170 ti->num_discard_requests = stripes; 173 ti->num_discard_requests = stripes;
171 174
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index e89f8e7d8a33..350bcf40485e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2628,7 +2628,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2628 goto bad_thin_open; 2628 goto bad_thin_open;
2629 } 2629 }
2630 2630
2631 ti->split_io = tc->pool->sectors_per_block; 2631 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2632 if (r)
2633 goto bad_thin_open;
2634
2632 ti->num_flush_requests = 1; 2635 ti->num_flush_requests = 1;
2633 2636
2634 /* In case the pool supports discards, pass them on. */ 2637 /* In case the pool supports discards, pass them on. */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e24143cc2040..415c2803c0c9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -968,22 +968,41 @@ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti
968static sector_t max_io_len(sector_t sector, struct dm_target *ti) 968static sector_t max_io_len(sector_t sector, struct dm_target *ti)
969{ 969{
970 sector_t len = max_io_len_target_boundary(sector, ti); 970 sector_t len = max_io_len_target_boundary(sector, ti);
971 sector_t offset, max_len;
971 972
972 /* 973 /*
973 * Does the target need to split even further ? 974 * Does the target need to split even further?
974 */ 975 */
975 if (ti->split_io) { 976 if (ti->max_io_len) {
976 sector_t boundary; 977 offset = dm_target_offset(ti, sector);
977 sector_t offset = dm_target_offset(ti, sector); 978 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
978 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 979 max_len = sector_div(offset, ti->max_io_len);
979 - offset; 980 else
980 if (len > boundary) 981 max_len = offset & (ti->max_io_len - 1);
981 len = boundary; 982 max_len = ti->max_io_len - max_len;
983
984 if (len > max_len)
985 len = max_len;
982 } 986 }
983 987
984 return len; 988 return len;
985} 989}
986 990
991int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
992{
993 if (len > UINT_MAX) {
994 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
995 (unsigned long long)len, UINT_MAX);
996 ti->error = "Maximum size of target IO is too large";
997 return -EINVAL;
998 }
999
1000 ti->max_io_len = (uint32_t) len;
1001
1002 return 0;
1003}
1004EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1005
987static void __map_bio(struct dm_target *ti, struct bio *clone, 1006static void __map_bio(struct dm_target *ti, struct bio *clone,
988 struct dm_target_io *tio) 1007 struct dm_target_io *tio)
989{ 1008{