aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2014-03-14 18:41:24 -0400
committerMike Snitzer <snitzer@redhat.com>2014-06-03 13:44:06 -0400
commit1dd40c3ecd9b8a4ab91dbf2e6ce10b82a3b5ae63 (patch)
treea1473eb0643c544ac9efa13911d6119a8078ed60
parente0d6609a5fe34463ae2fd48d846931f70de8b37b (diff)
dm: introduce dm_accept_partial_bio
The function dm_accept_partial_bio allows the target to specify how many sectors of the current bio it will process. If the target only wants to accept part of the bio, it calls dm_accept_partial_bio and the DM core sends the rest of the data in next bio. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm.c59
-rw-r--r--include/linux/device-mapper.h2
2 files changed, 53 insertions, 8 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 368a20dd85c2..97940fc8c302 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1110,6 +1110,46 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1110} 1110}
1111EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1111EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1112 1112
1113/*
1114 * A target may call dm_accept_partial_bio only from the map routine. It is
1115 * allowed for all bio types except REQ_FLUSH.
1116 *
1117 * dm_accept_partial_bio informs the dm that the target only wants to process
1118 * additional n_sectors sectors of the bio and the rest of the data should be
1119 * sent in a next bio.
1120 *
1121 * A diagram that explains the arithmetics:
1122 * +--------------------+---------------+-------+
1123 * | 1 | 2 | 3 |
1124 * +--------------------+---------------+-------+
1125 *
1126 * <-------------- *tio->len_ptr --------------->
1127 * <------- bi_size ------->
1128 * <-- n_sectors -->
1129 *
1130 * Region 1 was already iterated over with bio_advance or similar function.
1131 * (it may be empty if the target doesn't use bio_advance)
1132 * Region 2 is the remaining bio size that the target wants to process.
1133 * (it may be empty if region 1 is non-empty, although there is no reason
1134 * to make it empty)
1135 * The target requires that region 3 is to be sent in the next bio.
1136 *
1137 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1138 * the partially processed part (the sum of regions 1+2) must be the same for all
1139 * copies of the bio.
1140 */
1141void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1142{
1143 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1144 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1145 BUG_ON(bio->bi_rw & REQ_FLUSH);
1146 BUG_ON(bi_size > *tio->len_ptr);
1147 BUG_ON(n_sectors > bi_size);
1148 *tio->len_ptr -= bi_size - n_sectors;
1149 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1150}
1151EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1152
1113static void __map_bio(struct dm_target_io *tio) 1153static void __map_bio(struct dm_target_io *tio)
1114{ 1154{
1115 int r; 1155 int r;
@@ -1200,11 +1240,13 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
1200 1240
1201static void __clone_and_map_simple_bio(struct clone_info *ci, 1241static void __clone_and_map_simple_bio(struct clone_info *ci,
1202 struct dm_target *ti, 1242 struct dm_target *ti,
1203 unsigned target_bio_nr, unsigned len) 1243 unsigned target_bio_nr, unsigned *len)
1204{ 1244{
1205 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr); 1245 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
1206 struct bio *clone = &tio->clone; 1246 struct bio *clone = &tio->clone;
1207 1247
1248 tio->len_ptr = len;
1249
1208 /* 1250 /*
1209 * Discard requests require the bio's inline iovecs be initialized. 1251 * Discard requests require the bio's inline iovecs be initialized.
1210 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1252 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
@@ -1212,13 +1254,13 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
1212 */ 1254 */
1213 __bio_clone_fast(clone, ci->bio); 1255 __bio_clone_fast(clone, ci->bio);
1214 if (len) 1256 if (len)
1215 bio_setup_sector(clone, ci->sector, len); 1257 bio_setup_sector(clone, ci->sector, *len);
1216 1258
1217 __map_bio(tio); 1259 __map_bio(tio);
1218} 1260}
1219 1261
1220static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1262static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1221 unsigned num_bios, unsigned len) 1263 unsigned num_bios, unsigned *len)
1222{ 1264{
1223 unsigned target_bio_nr; 1265 unsigned target_bio_nr;
1224 1266
@@ -1233,13 +1275,13 @@ static int __send_empty_flush(struct clone_info *ci)
1233 1275
1234 BUG_ON(bio_has_data(ci->bio)); 1276 BUG_ON(bio_has_data(ci->bio));
1235 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1277 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1236 __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0); 1278 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1237 1279
1238 return 0; 1280 return 0;
1239} 1281}
1240 1282
1241static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1283static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1242 sector_t sector, unsigned len) 1284 sector_t sector, unsigned *len)
1243{ 1285{
1244 struct bio *bio = ci->bio; 1286 struct bio *bio = ci->bio;
1245 struct dm_target_io *tio; 1287 struct dm_target_io *tio;
@@ -1254,7 +1296,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
1254 1296
1255 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1297 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1256 tio = alloc_tio(ci, ti, 0, target_bio_nr); 1298 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1257 clone_bio(tio, bio, sector, len); 1299 tio->len_ptr = len;
1300 clone_bio(tio, bio, sector, *len);
1258 __map_bio(tio); 1301 __map_bio(tio);
1259 } 1302 }
1260} 1303}
@@ -1306,7 +1349,7 @@ static int __send_changing_extent_only(struct clone_info *ci,
1306 else 1349 else
1307 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1350 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1308 1351
1309 __send_duplicate_bios(ci, ti, num_bios, len); 1352 __send_duplicate_bios(ci, ti, num_bios, &len);
1310 1353
1311 ci->sector += len; 1354 ci->sector += len;
1312 } while (ci->sector_count -= len); 1355 } while (ci->sector_count -= len);
@@ -1345,7 +1388,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
1345 1388
1346 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1389 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1347 1390
1348 __clone_and_map_data_bio(ci, ti, ci->sector, len); 1391 __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1349 1392
1350 ci->sector += len; 1393 ci->sector += len;
1351 ci->sector_count -= len; 1394 ci->sector_count -= len;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 63da56ed9796..0adca299f238 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -291,6 +291,7 @@ struct dm_target_io {
291 struct dm_io *io; 291 struct dm_io *io;
292 struct dm_target *ti; 292 struct dm_target *ti;
293 unsigned target_bio_nr; 293 unsigned target_bio_nr;
294 unsigned *len_ptr;
294 struct bio clone; 295 struct bio clone;
295}; 296};
296 297
@@ -401,6 +402,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
401struct gendisk *dm_disk(struct mapped_device *md); 402struct gendisk *dm_disk(struct mapped_device *md);
402int dm_suspended(struct dm_target *ti); 403int dm_suspended(struct dm_target *ti);
403int dm_noflush_suspending(struct dm_target *ti); 404int dm_noflush_suspending(struct dm_target *ti);
405void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
404union map_info *dm_get_rq_mapinfo(struct request *rq); 406union map_info *dm_get_rq_mapinfo(struct request *rq);
405 407
406struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 408struct queue_limits *dm_get_queue_limits(struct mapped_device *md);