aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-10-15 09:46:58 -0400
committerMike Snitzer <snitzer@redhat.com>2014-11-10 15:25:28 -0500
commit23ca2bb6c6104db9d4cff4e33cbabee303c49d4d (patch)
tree8864d046b4f97795c263705b8795d756c3a50131 /drivers/md/dm-thin.c
parent2d759a46b4d65e1392843cf9df7101897af87008 (diff)
dm thin: direct dispatch when breaking sharing
This use of direct submission in process_shared_bio() reduces latency for submitting bios in the shared cell by avoiding adding those bios to the deferred list and waiting for the next iteration of the worker. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c70
1 files changed, 57 insertions, 13 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5036d4b3f368..3f3a66124d46 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1390,11 +1390,53 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1390 } 1390 }
1391} 1391}
1392 1392
1393static void __remap_and_issue_shared_cell(void *context,
1394 struct dm_bio_prison_cell *cell)
1395{
1396 struct remap_info *info = context;
1397 struct bio *bio;
1398
1399 while ((bio = bio_list_pop(&cell->bios))) {
1400 if ((bio_data_dir(bio) == WRITE) ||
1401 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1402 bio_list_add(&info->defer_bios, bio);
1403 else {
1404 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1405
1406 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1407 inc_all_io_entry(info->tc->pool, bio);
1408 bio_list_add(&info->issue_bios, bio);
1409 }
1410 }
1411}
1412
1413static void remap_and_issue_shared_cell(struct thin_c *tc,
1414 struct dm_bio_prison_cell *cell,
1415 dm_block_t block)
1416{
1417 struct bio *bio;
1418 struct remap_info info;
1419
1420 info.tc = tc;
1421 bio_list_init(&info.defer_bios);
1422 bio_list_init(&info.issue_bios);
1423
1424 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1425 &info, cell);
1426
1427 while ((bio = bio_list_pop(&info.defer_bios)))
1428 thin_defer_bio(tc, bio);
1429
1430 while ((bio = bio_list_pop(&info.issue_bios)))
1431 remap_and_issue(tc, bio, block);
1432}
1433
1393static void process_shared_bio(struct thin_c *tc, struct bio *bio, 1434static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1394 dm_block_t block, 1435 dm_block_t block,
1395 struct dm_thin_lookup_result *lookup_result) 1436 struct dm_thin_lookup_result *lookup_result,
1437 struct dm_bio_prison_cell *virt_cell)
1396{ 1438{
1397 struct dm_bio_prison_cell *cell; 1439 struct dm_bio_prison_cell *data_cell;
1398 struct pool *pool = tc->pool; 1440 struct pool *pool = tc->pool;
1399 struct dm_cell_key key; 1441 struct dm_cell_key key;
1400 1442
@@ -1403,19 +1445,23 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1403 * of being broken so we have nothing further to do here. 1445 * of being broken so we have nothing further to do here.
1404 */ 1446 */
1405 build_data_key(tc->td, lookup_result->block, &key); 1447 build_data_key(tc->td, lookup_result->block, &key);
1406 if (bio_detain(pool, &key, bio, &cell)) 1448 if (bio_detain(pool, &key, bio, &data_cell)) {
1449 cell_defer_no_holder(tc, virt_cell);
1407 return; 1450 return;
1451 }
1408 1452
1409 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) 1453 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1410 break_sharing(tc, bio, block, &key, lookup_result, cell); 1454 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1411 else { 1455 cell_defer_no_holder(tc, virt_cell);
1456 } else {
1412 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1457 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1413 1458
1414 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1459 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1415 inc_all_io_entry(pool, bio); 1460 inc_all_io_entry(pool, bio);
1416 cell_defer_no_holder(tc, cell);
1417
1418 remap_and_issue(tc, bio, lookup_result->block); 1461 remap_and_issue(tc, bio, lookup_result->block);
1462
1463 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1464 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1419 } 1465 }
1420} 1466}
1421 1467
@@ -1484,11 +1530,9 @@ static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1484 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1530 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1485 switch (r) { 1531 switch (r) {
1486 case 0: 1532 case 0:
1487 if (lookup_result.shared) { 1533 if (lookup_result.shared)
1488 process_shared_bio(tc, bio, block, &lookup_result); 1534 process_shared_bio(tc, bio, block, &lookup_result, cell);
1489 // FIXME: we can't remap because we're waiting on a commit 1535 else {
1490 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1491 } else {
1492 inc_all_io_entry(pool, bio); 1536 inc_all_io_entry(pool, bio);
1493 remap_and_issue(tc, bio, lookup_result.block); 1537 remap_and_issue(tc, bio, lookup_result.block);
1494 inc_remap_and_issue_cell(tc, cell, lookup_result.block); 1538 inc_remap_and_issue_cell(tc, cell, lookup_result.block);