aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-10-29 20:17:49 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:55 -0500
commit1c3b13e64cf70d652fb04e32d13ae3e36810c2e4 (patch)
treeca3d3fd7d0c6d99fa2bc041ed51bac4aaa9a242a
parent5341a6278bc5d10dbbb2ab6031b41d95c8db7a35 (diff)
dm: Refactor for new bio cloning/splitting
We need to convert the dm code to the new bvec_iter primitives which respect bi_bvec_done; they also allow us to drastically simplify dm's bio splitting code. Also, it's no longer necessary to save/restore the bvec array anymore - driver conversions for immutable bvecs are done, so drivers should never be modifying it. Also kill bio_sector_offset(), dm was the only user and it doesn't make much sense anymore. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Alasdair Kergon <agk@redhat.com> Cc: dm-devel@redhat.com Reviewed-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-bio-record.h25
-rw-r--r--drivers/md/dm.c174
-rw-r--r--fs/bio.c72
-rw-r--r--include/linux/bio.h2
4 files changed, 20 insertions, 253 deletions
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 4f46e8e528de..dd3646111561 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,49 +17,24 @@
17 * original bio state. 17 * original bio state.
18 */ 18 */
19 19
20struct dm_bio_vec_details {
21#if PAGE_SIZE < 65536
22 __u16 bv_len;
23 __u16 bv_offset;
24#else
25 unsigned bv_len;
26 unsigned bv_offset;
27#endif
28};
29
30struct dm_bio_details { 20struct dm_bio_details {
31 struct block_device *bi_bdev; 21 struct block_device *bi_bdev;
32 unsigned long bi_flags; 22 unsigned long bi_flags;
33 struct bvec_iter bi_iter; 23 struct bvec_iter bi_iter;
34 struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
35}; 24};
36 25
37static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) 26static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
38{ 27{
39 unsigned i;
40
41 bd->bi_bdev = bio->bi_bdev; 28 bd->bi_bdev = bio->bi_bdev;
42 bd->bi_flags = bio->bi_flags; 29 bd->bi_flags = bio->bi_flags;
43 bd->bi_iter = bio->bi_iter; 30 bd->bi_iter = bio->bi_iter;
44
45 for (i = 0; i < bio->bi_vcnt; i++) {
46 bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
47 bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
48 }
49} 31}
50 32
51static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) 33static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
52{ 34{
53 unsigned i;
54
55 bio->bi_bdev = bd->bi_bdev; 35 bio->bi_bdev = bd->bi_bdev;
56 bio->bi_flags = bd->bi_flags; 36 bio->bi_flags = bd->bi_flags;
57 bio->bi_iter = bd->bi_iter; 37 bio->bi_iter = bd->bi_iter;
58
59 for (i = 0; i < bio->bi_vcnt; i++) {
60 bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
61 bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
62 }
63} 38}
64 39
65#endif 40#endif
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ccd064ea4fe6..44a2fa6814ce 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1155,7 +1155,6 @@ struct clone_info {
1155 struct dm_io *io; 1155 struct dm_io *io;
1156 sector_t sector; 1156 sector_t sector;
1157 sector_t sector_count; 1157 sector_t sector_count;
1158 unsigned short idx;
1159}; 1158};
1160 1159
1161static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) 1160static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
@@ -1164,68 +1163,24 @@ static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1164 bio->bi_iter.bi_size = to_bytes(len); 1163 bio->bi_iter.bi_size = to_bytes(len);
1165} 1164}
1166 1165
1167static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
1168{
1169 bio->bi_iter.bi_idx = idx;
1170 bio->bi_vcnt = idx + bv_count;
1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
1172}
1173
1174static void clone_bio_integrity(struct bio *bio, struct bio *clone,
1175 unsigned short idx, unsigned len, unsigned offset,
1176 unsigned trim)
1177{
1178 if (!bio_integrity(bio))
1179 return;
1180
1181 bio_integrity_clone(clone, bio, GFP_NOIO);
1182
1183 if (trim)
1184 bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
1185}
1186
1187/*
1188 * Creates a little bio that just does part of a bvec.
1189 */
1190static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1191 sector_t sector, unsigned short idx,
1192 unsigned offset, unsigned len)
1193{
1194 struct bio *clone = &tio->clone;
1195 struct bio_vec *bv = bio->bi_io_vec + idx;
1196
1197 *clone->bi_io_vec = *bv;
1198
1199 bio_setup_sector(clone, sector, len);
1200
1201 clone->bi_bdev = bio->bi_bdev;
1202 clone->bi_rw = bio->bi_rw;
1203 clone->bi_vcnt = 1;
1204 clone->bi_io_vec->bv_offset = offset;
1205 clone->bi_io_vec->bv_len = clone->bi_iter.bi_size;
1206 clone->bi_flags |= 1 << BIO_CLONED;
1207
1208 clone_bio_integrity(bio, clone, idx, len, offset, 1);
1209}
1210
1211/* 1166/*
1212 * Creates a bio that consists of range of complete bvecs. 1167 * Creates a bio that consists of range of complete bvecs.
1213 */ 1168 */
1214static void clone_bio(struct dm_target_io *tio, struct bio *bio, 1169static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1215 sector_t sector, unsigned short idx, 1170 sector_t sector, unsigned len)
1216 unsigned short bv_count, unsigned len)
1217{ 1171{
1218 struct bio *clone = &tio->clone; 1172 struct bio *clone = &tio->clone;
1219 unsigned trim = 0;
1220 1173
1221 __bio_clone(clone, bio); 1174 __bio_clone_fast(clone, bio);
1222 bio_setup_sector(clone, sector, len); 1175
1223 bio_setup_bv(clone, idx, bv_count); 1176 if (bio_integrity(bio))
1177 bio_integrity_clone(clone, bio, GFP_NOIO);
1178
1179 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1180 clone->bi_iter.bi_size = to_bytes(len);
1224 1181
1225 if (idx != bio->bi_iter.bi_idx || 1182 if (bio_integrity(bio))
1226 clone->bi_iter.bi_size < bio->bi_iter.bi_size) 1183 bio_integrity_trim(clone, 0, len);
1227 trim = 1;
1228 clone_bio_integrity(bio, clone, idx, len, 0, trim);
1229} 1184}
1230 1185
1231static struct dm_target_io *alloc_tio(struct clone_info *ci, 1186static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1258,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
1258 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1213 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1259 * and discard, so no need for concern about wasted bvec allocations. 1214 * and discard, so no need for concern about wasted bvec allocations.
1260 */ 1215 */
1261 __bio_clone(clone, ci->bio); 1216 __bio_clone_fast(clone, ci->bio);
1262 if (len) 1217 if (len)
1263 bio_setup_sector(clone, ci->sector, len); 1218 bio_setup_sector(clone, ci->sector, len);
1264 1219
@@ -1287,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
1287} 1242}
1288 1243
1289static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1244static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1290 sector_t sector, int nr_iovecs, 1245 sector_t sector, unsigned len)
1291 unsigned short idx, unsigned short bv_count,
1292 unsigned offset, unsigned len,
1293 unsigned split_bvec)
1294{ 1246{
1295 struct bio *bio = ci->bio; 1247 struct bio *bio = ci->bio;
1296 struct dm_target_io *tio; 1248 struct dm_target_io *tio;
@@ -1304,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
1304 num_target_bios = ti->num_write_bios(ti, bio); 1256 num_target_bios = ti->num_write_bios(ti, bio);
1305 1257
1306 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1258 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1307 tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr); 1259 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1308 if (split_bvec) 1260 clone_bio(tio, bio, sector, len);
1309 clone_split_bio(tio, bio, sector, idx, offset, len);
1310 else
1311 clone_bio(tio, bio, sector, idx, bv_count, len);
1312 __map_bio(tio); 1261 __map_bio(tio);
1313 } 1262 }
1314} 1263}
@@ -1380,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci)
1380} 1329}
1381 1330
1382/* 1331/*
1383 * Find maximum number of sectors / bvecs we can process with a single bio.
1384 */
1385static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
1386{
1387 struct bio *bio = ci->bio;
1388 sector_t bv_len, total_len = 0;
1389
1390 for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
1391 bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
1392
1393 if (bv_len > max)
1394 break;
1395
1396 max -= bv_len;
1397 total_len += bv_len;
1398 }
1399
1400 return total_len;
1401}
1402
1403static int __split_bvec_across_targets(struct clone_info *ci,
1404 struct dm_target *ti, sector_t max)
1405{
1406 struct bio *bio = ci->bio;
1407 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1408 sector_t remaining = to_sector(bv->bv_len);
1409 unsigned offset = 0;
1410 sector_t len;
1411
1412 do {
1413 if (offset) {
1414 ti = dm_table_find_target(ci->map, ci->sector);
1415 if (!dm_target_is_valid(ti))
1416 return -EIO;
1417
1418 max = max_io_len(ci->sector, ti);
1419 }
1420
1421 len = min(remaining, max);
1422
1423 __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
1424 bv->bv_offset + offset, len, 1);
1425
1426 ci->sector += len;
1427 ci->sector_count -= len;
1428 offset += to_bytes(len);
1429 } while (remaining -= len);
1430
1431 ci->idx++;
1432
1433 return 0;
1434}
1435
1436/*
1437 * Select the correct strategy for processing a non-flush bio. 1332 * Select the correct strategy for processing a non-flush bio.
1438 */ 1333 */
1439static int __split_and_process_non_flush(struct clone_info *ci) 1334static int __split_and_process_non_flush(struct clone_info *ci)
1440{ 1335{
1441 struct bio *bio = ci->bio; 1336 struct bio *bio = ci->bio;
1442 struct dm_target *ti; 1337 struct dm_target *ti;
1443 sector_t len, max; 1338 unsigned len;
1444 int idx;
1445 1339
1446 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1340 if (unlikely(bio->bi_rw & REQ_DISCARD))
1447 return __send_discard(ci); 1341 return __send_discard(ci);
@@ -1452,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
1452 if (!dm_target_is_valid(ti)) 1346 if (!dm_target_is_valid(ti))
1453 return -EIO; 1347 return -EIO;
1454 1348
1455 max = max_io_len(ci->sector, ti); 1349 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1456
1457 /*
1458 * Optimise for the simple case where we can do all of
1459 * the remaining io with a single clone.
1460 */
1461 if (ci->sector_count <= max) {
1462 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1463 ci->idx, bio->bi_vcnt - ci->idx, 0,
1464 ci->sector_count, 0);
1465 ci->sector_count = 0;
1466 return 0;
1467 }
1468
1469 /*
1470 * There are some bvecs that don't span targets.
1471 * Do as many of these as possible.
1472 */
1473 if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1474 len = __len_within_target(ci, max, &idx);
1475
1476 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1477 ci->idx, idx - ci->idx, 0, len, 0);
1478 1350
1479 ci->sector += len; 1351 __clone_and_map_data_bio(ci, ti, ci->sector, len);
1480 ci->sector_count -= len;
1481 ci->idx = idx;
1482 1352
1483 return 0; 1353 ci->sector += len;
1484 } 1354 ci->sector_count -= len;
1485 1355
1486 /* 1356 return 0;
1487 * Handle a bvec that must be split between two or more targets.
1488 */
1489 return __split_bvec_across_targets(ci, ti, max);
1490} 1357}
1491 1358
1492/* 1359/*
@@ -1512,7 +1379,6 @@ static void __split_and_process_bio(struct mapped_device *md,
1512 ci.io->md = md; 1379 ci.io->md = md;
1513 spin_lock_init(&ci.io->endio_lock); 1380 spin_lock_init(&ci.io->endio_lock);
1514 ci.sector = bio->bi_iter.bi_sector; 1381 ci.sector = bio->bi_iter.bi_sector;
1515 ci.idx = bio->bi_iter.bi_idx;
1516 1382
1517 start_io_acct(ci.io); 1383 start_io_acct(ci.io);
1518 1384
diff --git a/fs/bio.c b/fs/bio.c
index 00dc1893c6ee..6e42b68ab0ac 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -515,40 +515,6 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
515EXPORT_SYMBOL(bio_phys_segments); 515EXPORT_SYMBOL(bio_phys_segments);
516 516
517/** 517/**
518 * __bio_clone - clone a bio
519 * @bio: destination bio
520 * @bio_src: bio to clone
521 *
522 * Clone a &bio. Caller will own the returned bio, but not
523 * the actual data it points to. Reference count of returned
524 * bio will be one.
525 */
526void __bio_clone(struct bio *bio, struct bio *bio_src)
527{
528 if (bio_is_rw(bio_src)) {
529 struct bio_vec bv;
530 struct bvec_iter iter;
531
532 bio_for_each_segment(bv, bio_src, iter)
533 bio->bi_io_vec[bio->bi_vcnt++] = bv;
534 } else if (bio_has_data(bio_src)) {
535 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
536 bio_src->bi_max_vecs * sizeof(struct bio_vec));
537 bio->bi_vcnt = bio_src->bi_vcnt;
538 }
539
540 /*
541 * most users will be overriding ->bi_bdev with a new target,
542 * so we don't set nor calculate new physical/hw segment counts here
543 */
544 bio->bi_bdev = bio_src->bi_bdev;
545 bio->bi_flags |= 1 << BIO_CLONED;
546 bio->bi_rw = bio_src->bi_rw;
547 bio->bi_iter = bio_src->bi_iter;
548}
549EXPORT_SYMBOL(__bio_clone);
550
551/**
552 * __bio_clone_fast - clone a bio that shares the original bio's biovec 518 * __bio_clone_fast - clone a bio that shares the original bio's biovec
553 * @bio: destination bio 519 * @bio: destination bio
554 * @bio_src: bio to clone 520 * @bio_src: bio to clone
@@ -1921,44 +1887,6 @@ void bio_trim(struct bio *bio, int offset, int size)
1921} 1887}
1922EXPORT_SYMBOL_GPL(bio_trim); 1888EXPORT_SYMBOL_GPL(bio_trim);
1923 1889
1924/**
1925 * bio_sector_offset - Find hardware sector offset in bio
1926 * @bio: bio to inspect
1927 * @index: bio_vec index
1928 * @offset: offset in bv_page
1929 *
1930 * Return the number of hardware sectors between beginning of bio
1931 * and an end point indicated by a bio_vec index and an offset
1932 * within that vector's page.
1933 */
1934sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1935 unsigned int offset)
1936{
1937 unsigned int sector_sz;
1938 struct bio_vec *bv;
1939 sector_t sectors;
1940 int i;
1941
1942 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1943 sectors = 0;
1944
1945 if (index >= bio->bi_iter.bi_idx)
1946 index = bio->bi_vcnt - 1;
1947
1948 bio_for_each_segment_all(bv, bio, i) {
1949 if (i == index) {
1950 if (offset > bv->bv_offset)
1951 sectors += (offset - bv->bv_offset) / sector_sz;
1952 break;
1953 }
1954
1955 sectors += bv->bv_len / sector_sz;
1956 }
1957
1958 return sectors;
1959}
1960EXPORT_SYMBOL(bio_sector_offset);
1961
1962/* 1890/*
1963 * create memory pools for biovec's in a bio_set. 1891 * create memory pools for biovec's in a bio_set.
1964 * use the global biovec slabs created for general use. 1892 * use the global biovec slabs created for general use.
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1f83f4a3083e..0c32a45a419c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -330,7 +330,6 @@ extern void bio_put(struct bio *);
330 330
331extern void __bio_clone_fast(struct bio *, struct bio *); 331extern void __bio_clone_fast(struct bio *, struct bio *);
332extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); 332extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
333extern void __bio_clone(struct bio *, struct bio *);
334extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 333extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
335 334
336extern struct bio_set *fs_bio_set; 335extern struct bio_set *fs_bio_set;
@@ -370,7 +369,6 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
370extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 369extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
371 unsigned int, unsigned int); 370 unsigned int, unsigned int);
372extern int bio_get_nr_vecs(struct block_device *); 371extern int bio_get_nr_vecs(struct block_device *);
373extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
374extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 372extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
375 unsigned long, unsigned int, int, gfp_t); 373 unsigned long, unsigned int, int, gfp_t);
376struct sg_iovec; 374struct sg_iovec;