diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-29 20:17:49 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-24 01:33:55 -0500 |
commit | 1c3b13e64cf70d652fb04e32d13ae3e36810c2e4 (patch) | |
tree | ca3d3fd7d0c6d99fa2bc041ed51bac4aaa9a242a /drivers/md/dm.c | |
parent | 5341a6278bc5d10dbbb2ab6031b41d95c8db7a35 (diff) |
dm: Refactor for new bio cloning/splitting
We need to convert the dm code to the new bvec_iter primitives which
respect bi_bvec_done; they also allow us to drastically simplify dm's
bio splitting code.
Also, it's no longer necessary to save/restore the bvec array anymore -
driver conversions for immutable bvecs are done, so drivers should never
be modifying it.
Also kill bio_sector_offset(), dm was the only user and it doesn't make
much sense anymore.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 174 |
1 files changed, 20 insertions, 154 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ccd064ea4fe6..44a2fa6814ce 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1155,7 +1155,6 @@ struct clone_info { | |||
1155 | struct dm_io *io; | 1155 | struct dm_io *io; |
1156 | sector_t sector; | 1156 | sector_t sector; |
1157 | sector_t sector_count; | 1157 | sector_t sector_count; |
1158 | unsigned short idx; | ||
1159 | }; | 1158 | }; |
1160 | 1159 | ||
1161 | static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) | 1160 | static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) |
@@ -1164,68 +1163,24 @@ static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) | |||
1164 | bio->bi_iter.bi_size = to_bytes(len); | 1163 | bio->bi_iter.bi_size = to_bytes(len); |
1165 | } | 1164 | } |
1166 | 1165 | ||
1167 | static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) | ||
1168 | { | ||
1169 | bio->bi_iter.bi_idx = idx; | ||
1170 | bio->bi_vcnt = idx + bv_count; | ||
1171 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
1172 | } | ||
1173 | |||
1174 | static void clone_bio_integrity(struct bio *bio, struct bio *clone, | ||
1175 | unsigned short idx, unsigned len, unsigned offset, | ||
1176 | unsigned trim) | ||
1177 | { | ||
1178 | if (!bio_integrity(bio)) | ||
1179 | return; | ||
1180 | |||
1181 | bio_integrity_clone(clone, bio, GFP_NOIO); | ||
1182 | |||
1183 | if (trim) | ||
1184 | bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len); | ||
1185 | } | ||
1186 | |||
1187 | /* | ||
1188 | * Creates a little bio that just does part of a bvec. | ||
1189 | */ | ||
1190 | static void clone_split_bio(struct dm_target_io *tio, struct bio *bio, | ||
1191 | sector_t sector, unsigned short idx, | ||
1192 | unsigned offset, unsigned len) | ||
1193 | { | ||
1194 | struct bio *clone = &tio->clone; | ||
1195 | struct bio_vec *bv = bio->bi_io_vec + idx; | ||
1196 | |||
1197 | *clone->bi_io_vec = *bv; | ||
1198 | |||
1199 | bio_setup_sector(clone, sector, len); | ||
1200 | |||
1201 | clone->bi_bdev = bio->bi_bdev; | ||
1202 | clone->bi_rw = bio->bi_rw; | ||
1203 | clone->bi_vcnt = 1; | ||
1204 | clone->bi_io_vec->bv_offset = offset; | ||
1205 | clone->bi_io_vec->bv_len = clone->bi_iter.bi_size; | ||
1206 | clone->bi_flags |= 1 << BIO_CLONED; | ||
1207 | |||
1208 | clone_bio_integrity(bio, clone, idx, len, offset, 1); | ||
1209 | } | ||
1210 | |||
1211 | /* | 1166 | /* |
1212 | * Creates a bio that consists of range of complete bvecs. | 1167 | * Creates a bio that consists of range of complete bvecs. |
1213 | */ | 1168 | */ |
1214 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, | 1169 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, |
1215 | sector_t sector, unsigned short idx, | 1170 | sector_t sector, unsigned len) |
1216 | unsigned short bv_count, unsigned len) | ||
1217 | { | 1171 | { |
1218 | struct bio *clone = &tio->clone; | 1172 | struct bio *clone = &tio->clone; |
1219 | unsigned trim = 0; | ||
1220 | 1173 | ||
1221 | __bio_clone(clone, bio); | 1174 | __bio_clone_fast(clone, bio); |
1222 | bio_setup_sector(clone, sector, len); | 1175 | |
1223 | bio_setup_bv(clone, idx, bv_count); | 1176 | if (bio_integrity(bio)) |
1177 | bio_integrity_clone(clone, bio, GFP_NOIO); | ||
1178 | |||
1179 | bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); | ||
1180 | clone->bi_iter.bi_size = to_bytes(len); | ||
1224 | 1181 | ||
1225 | if (idx != bio->bi_iter.bi_idx || | 1182 | if (bio_integrity(bio)) |
1226 | clone->bi_iter.bi_size < bio->bi_iter.bi_size) | 1183 | bio_integrity_trim(clone, 0, len); |
1227 | trim = 1; | ||
1228 | clone_bio_integrity(bio, clone, idx, len, 0, trim); | ||
1229 | } | 1184 | } |
1230 | 1185 | ||
1231 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | 1186 | static struct dm_target_io *alloc_tio(struct clone_info *ci, |
@@ -1258,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci, | |||
1258 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | 1213 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush |
1259 | * and discard, so no need for concern about wasted bvec allocations. | 1214 | * and discard, so no need for concern about wasted bvec allocations. |
1260 | */ | 1215 | */ |
1261 | __bio_clone(clone, ci->bio); | 1216 | __bio_clone_fast(clone, ci->bio); |
1262 | if (len) | 1217 | if (len) |
1263 | bio_setup_sector(clone, ci->sector, len); | 1218 | bio_setup_sector(clone, ci->sector, len); |
1264 | 1219 | ||
@@ -1287,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci) | |||
1287 | } | 1242 | } |
1288 | 1243 | ||
1289 | static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, | 1244 | static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, |
1290 | sector_t sector, int nr_iovecs, | 1245 | sector_t sector, unsigned len) |
1291 | unsigned short idx, unsigned short bv_count, | ||
1292 | unsigned offset, unsigned len, | ||
1293 | unsigned split_bvec) | ||
1294 | { | 1246 | { |
1295 | struct bio *bio = ci->bio; | 1247 | struct bio *bio = ci->bio; |
1296 | struct dm_target_io *tio; | 1248 | struct dm_target_io *tio; |
@@ -1304,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti | |||
1304 | num_target_bios = ti->num_write_bios(ti, bio); | 1256 | num_target_bios = ti->num_write_bios(ti, bio); |
1305 | 1257 | ||
1306 | for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { | 1258 | for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { |
1307 | tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr); | 1259 | tio = alloc_tio(ci, ti, 0, target_bio_nr); |
1308 | if (split_bvec) | 1260 | clone_bio(tio, bio, sector, len); |
1309 | clone_split_bio(tio, bio, sector, idx, offset, len); | ||
1310 | else | ||
1311 | clone_bio(tio, bio, sector, idx, bv_count, len); | ||
1312 | __map_bio(tio); | 1261 | __map_bio(tio); |
1313 | } | 1262 | } |
1314 | } | 1263 | } |
@@ -1380,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci) | |||
1380 | } | 1329 | } |
1381 | 1330 | ||
1382 | /* | 1331 | /* |
1383 | * Find maximum number of sectors / bvecs we can process with a single bio. | ||
1384 | */ | ||
1385 | static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx) | ||
1386 | { | ||
1387 | struct bio *bio = ci->bio; | ||
1388 | sector_t bv_len, total_len = 0; | ||
1389 | |||
1390 | for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) { | ||
1391 | bv_len = to_sector(bio->bi_io_vec[*idx].bv_len); | ||
1392 | |||
1393 | if (bv_len > max) | ||
1394 | break; | ||
1395 | |||
1396 | max -= bv_len; | ||
1397 | total_len += bv_len; | ||
1398 | } | ||
1399 | |||
1400 | return total_len; | ||
1401 | } | ||
1402 | |||
1403 | static int __split_bvec_across_targets(struct clone_info *ci, | ||
1404 | struct dm_target *ti, sector_t max) | ||
1405 | { | ||
1406 | struct bio *bio = ci->bio; | ||
1407 | struct bio_vec *bv = bio->bi_io_vec + ci->idx; | ||
1408 | sector_t remaining = to_sector(bv->bv_len); | ||
1409 | unsigned offset = 0; | ||
1410 | sector_t len; | ||
1411 | |||
1412 | do { | ||
1413 | if (offset) { | ||
1414 | ti = dm_table_find_target(ci->map, ci->sector); | ||
1415 | if (!dm_target_is_valid(ti)) | ||
1416 | return -EIO; | ||
1417 | |||
1418 | max = max_io_len(ci->sector, ti); | ||
1419 | } | ||
1420 | |||
1421 | len = min(remaining, max); | ||
1422 | |||
1423 | __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0, | ||
1424 | bv->bv_offset + offset, len, 1); | ||
1425 | |||
1426 | ci->sector += len; | ||
1427 | ci->sector_count -= len; | ||
1428 | offset += to_bytes(len); | ||
1429 | } while (remaining -= len); | ||
1430 | |||
1431 | ci->idx++; | ||
1432 | |||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | /* | ||
1437 | * Select the correct strategy for processing a non-flush bio. | 1332 | * Select the correct strategy for processing a non-flush bio. |
1438 | */ | 1333 | */ |
1439 | static int __split_and_process_non_flush(struct clone_info *ci) | 1334 | static int __split_and_process_non_flush(struct clone_info *ci) |
1440 | { | 1335 | { |
1441 | struct bio *bio = ci->bio; | 1336 | struct bio *bio = ci->bio; |
1442 | struct dm_target *ti; | 1337 | struct dm_target *ti; |
1443 | sector_t len, max; | 1338 | unsigned len; |
1444 | int idx; | ||
1445 | 1339 | ||
1446 | if (unlikely(bio->bi_rw & REQ_DISCARD)) | 1340 | if (unlikely(bio->bi_rw & REQ_DISCARD)) |
1447 | return __send_discard(ci); | 1341 | return __send_discard(ci); |
@@ -1452,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci) | |||
1452 | if (!dm_target_is_valid(ti)) | 1346 | if (!dm_target_is_valid(ti)) |
1453 | return -EIO; | 1347 | return -EIO; |
1454 | 1348 | ||
1455 | max = max_io_len(ci->sector, ti); | 1349 | len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); |
1456 | |||
1457 | /* | ||
1458 | * Optimise for the simple case where we can do all of | ||
1459 | * the remaining io with a single clone. | ||
1460 | */ | ||
1461 | if (ci->sector_count <= max) { | ||
1462 | __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, | ||
1463 | ci->idx, bio->bi_vcnt - ci->idx, 0, | ||
1464 | ci->sector_count, 0); | ||
1465 | ci->sector_count = 0; | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | /* | ||
1470 | * There are some bvecs that don't span targets. | ||
1471 | * Do as many of these as possible. | ||
1472 | */ | ||
1473 | if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { | ||
1474 | len = __len_within_target(ci, max, &idx); | ||
1475 | |||
1476 | __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, | ||
1477 | ci->idx, idx - ci->idx, 0, len, 0); | ||
1478 | 1350 | ||
1479 | ci->sector += len; | 1351 | __clone_and_map_data_bio(ci, ti, ci->sector, len); |
1480 | ci->sector_count -= len; | ||
1481 | ci->idx = idx; | ||
1482 | 1352 | ||
1483 | return 0; | 1353 | ci->sector += len; |
1484 | } | 1354 | ci->sector_count -= len; |
1485 | 1355 | ||
1486 | /* | 1356 | return 0; |
1487 | * Handle a bvec that must be split between two or more targets. | ||
1488 | */ | ||
1489 | return __split_bvec_across_targets(ci, ti, max); | ||
1490 | } | 1357 | } |
1491 | 1358 | ||
1492 | /* | 1359 | /* |
@@ -1512,7 +1379,6 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
1512 | ci.io->md = md; | 1379 | ci.io->md = md; |
1513 | spin_lock_init(&ci.io->endio_lock); | 1380 | spin_lock_init(&ci.io->endio_lock); |
1514 | ci.sector = bio->bi_iter.bi_sector; | 1381 | ci.sector = bio->bi_iter.bi_sector; |
1515 | ci.idx = bio->bi_iter.bi_idx; | ||
1516 | 1382 | ||
1517 | start_io_acct(ci.io); | 1383 | start_io_acct(ci.io); |
1518 | 1384 | ||