aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c84
1 files changed, 58 insertions, 26 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 77e6eff41cae..c72e4d5a9617 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -63,18 +63,6 @@ struct dm_io {
63}; 63};
64 64
65/* 65/*
66 * For bio-based dm.
67 * One of these is allocated per target within a bio. Hopefully
68 * this will be simplified out one day.
69 */
70struct dm_target_io {
71 struct dm_io *io;
72 struct dm_target *ti;
73 union map_info info;
74 struct bio clone;
75};
76
77/*
78 * For request-based dm. 66 * For request-based dm.
79 * One of these is allocated per request. 67 * One of these is allocated per request.
80 */ 68 */
@@ -657,7 +645,7 @@ static void clone_endio(struct bio *bio, int error)
657 error = -EIO; 645 error = -EIO;
658 646
659 if (endio) { 647 if (endio) {
660 r = endio(tio->ti, bio, error, &tio->info); 648 r = endio(tio->ti, bio, error);
661 if (r < 0 || r == DM_ENDIO_REQUEUE) 649 if (r < 0 || r == DM_ENDIO_REQUEUE)
662 /* 650 /*
663 * error and requeue request are handled 651 * error and requeue request are handled
@@ -1016,7 +1004,7 @@ static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
1016 */ 1004 */
1017 atomic_inc(&tio->io->io_count); 1005 atomic_inc(&tio->io->io_count);
1018 sector = clone->bi_sector; 1006 sector = clone->bi_sector;
1019 r = ti->type->map(ti, clone, &tio->info); 1007 r = ti->type->map(ti, clone);
1020 if (r == DM_MAPIO_REMAPPED) { 1008 if (r == DM_MAPIO_REMAPPED) {
1021 /* the bio has been remapped so dispatch it */ 1009 /* the bio has been remapped so dispatch it */
1022 1010
@@ -1111,6 +1099,7 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
1111 tio->io = ci->io; 1099 tio->io = ci->io;
1112 tio->ti = ti; 1100 tio->ti = ti;
1113 memset(&tio->info, 0, sizeof(tio->info)); 1101 memset(&tio->info, 0, sizeof(tio->info));
1102 tio->target_request_nr = 0;
1114 1103
1115 return tio; 1104 return tio;
1116} 1105}
@@ -1121,7 +1110,7 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1121 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); 1110 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
1122 struct bio *clone = &tio->clone; 1111 struct bio *clone = &tio->clone;
1123 1112
1124 tio->info.target_request_nr = request_nr; 1113 tio->target_request_nr = request_nr;
1125 1114
1126 /* 1115 /*
1127 * Discard requests require the bio's inline iovecs be initialized. 1116 * Discard requests require the bio's inline iovecs be initialized.
@@ -1174,7 +1163,28 @@ static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1174 ci->sector_count = 0; 1163 ci->sector_count = 0;
1175} 1164}
1176 1165
1177static int __clone_and_map_discard(struct clone_info *ci) 1166typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
1167
1168static unsigned get_num_discard_requests(struct dm_target *ti)
1169{
1170 return ti->num_discard_requests;
1171}
1172
1173static unsigned get_num_write_same_requests(struct dm_target *ti)
1174{
1175 return ti->num_write_same_requests;
1176}
1177
1178typedef bool (*is_split_required_fn)(struct dm_target *ti);
1179
1180static bool is_split_required_for_discard(struct dm_target *ti)
1181{
1182 return ti->split_discard_requests;
1183}
1184
1185static int __clone_and_map_changing_extent_only(struct clone_info *ci,
1186 get_num_requests_fn get_num_requests,
1187 is_split_required_fn is_split_required)
1178{ 1188{
1179 struct dm_target *ti; 1189 struct dm_target *ti;
1180 sector_t len; 1190 sector_t len;
@@ -1185,15 +1195,15 @@ static int __clone_and_map_discard(struct clone_info *ci)
1185 return -EIO; 1195 return -EIO;
1186 1196
1187 /* 1197 /*
1188 * Even though the device advertised discard support, 1198 * Even though the device advertised support for this type of
1189 * that does not mean every target supports it, and 1199 * request, that does not mean every target supports it, and
1190 * reconfiguration might also have changed that since the 1200 * reconfiguration might also have changed that since the
1191 * check was performed. 1201 * check was performed.
1192 */ 1202 */
1193 if (!ti->num_discard_requests) 1203 if (!get_num_requests || !get_num_requests(ti))
1194 return -EOPNOTSUPP; 1204 return -EOPNOTSUPP;
1195 1205
1196 if (!ti->split_discard_requests) 1206 if (is_split_required && !is_split_required(ti))
1197 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1207 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1198 else 1208 else
1199 len = min(ci->sector_count, max_io_len(ci->sector, ti)); 1209 len = min(ci->sector_count, max_io_len(ci->sector, ti));
@@ -1206,6 +1216,17 @@ static int __clone_and_map_discard(struct clone_info *ci)
1206 return 0; 1216 return 0;
1207} 1217}
1208 1218
1219static int __clone_and_map_discard(struct clone_info *ci)
1220{
1221 return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
1222 is_split_required_for_discard);
1223}
1224
1225static int __clone_and_map_write_same(struct clone_info *ci)
1226{
1227 return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
1228}
1229
1209static int __clone_and_map(struct clone_info *ci) 1230static int __clone_and_map(struct clone_info *ci)
1210{ 1231{
1211 struct bio *bio = ci->bio; 1232 struct bio *bio = ci->bio;
@@ -1215,6 +1236,8 @@ static int __clone_and_map(struct clone_info *ci)
1215 1236
1216 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1237 if (unlikely(bio->bi_rw & REQ_DISCARD))
1217 return __clone_and_map_discard(ci); 1238 return __clone_and_map_discard(ci);
1239 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1240 return __clone_and_map_write_same(ci);
1218 1241
1219 ti = dm_table_find_target(ci->map, ci->sector); 1242 ti = dm_table_find_target(ci->map, ci->sector);
1220 if (!dm_target_is_valid(ti)) 1243 if (!dm_target_is_valid(ti))
@@ -1946,13 +1969,20 @@ static void free_dev(struct mapped_device *md)
1946 1969
1947static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1970static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1948{ 1971{
1949 struct dm_md_mempools *p; 1972 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1950 1973
1951 if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) 1974 if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
1952 /* the md already has necessary mempools */ 1975 /*
1976 * The md already has necessary mempools. Reload just the
1977 * bioset because front_pad may have changed because
1978 * a different table was loaded.
1979 */
1980 bioset_free(md->bs);
1981 md->bs = p->bs;
1982 p->bs = NULL;
1953 goto out; 1983 goto out;
1984 }
1954 1985
1955 p = dm_table_get_md_mempools(t);
1956 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); 1986 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1957 1987
1958 md->io_pool = p->io_pool; 1988 md->io_pool = p->io_pool;
@@ -2711,7 +2741,7 @@ int dm_noflush_suspending(struct dm_target *ti)
2711} 2741}
2712EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2742EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2713 2743
2714struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) 2744struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
2715{ 2745{
2716 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2746 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2717 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; 2747 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
@@ -2719,6 +2749,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2719 if (!pools) 2749 if (!pools)
2720 return NULL; 2750 return NULL;
2721 2751
2752 per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
2753
2722 pools->io_pool = (type == DM_TYPE_BIO_BASED) ? 2754 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2723 mempool_create_slab_pool(MIN_IOS, _io_cache) : 2755 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2724 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); 2756 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
@@ -2734,7 +2766,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2734 2766
2735 pools->bs = (type == DM_TYPE_BIO_BASED) ? 2767 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2736 bioset_create(pool_size, 2768 bioset_create(pool_size,
2737 offsetof(struct dm_target_io, clone)) : 2769 per_bio_data_size + offsetof(struct dm_target_io, clone)) :
2738 bioset_create(pool_size, 2770 bioset_create(pool_size,
2739 offsetof(struct dm_rq_clone_bio_info, clone)); 2771 offsetof(struct dm_rq_clone_bio_info, clone));
2740 if (!pools->bs) 2772 if (!pools->bs)