aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-10-10 08:43:14 -0400
committerMike Snitzer <snitzer@redhat.com>2014-11-10 15:25:28 -0500
commita374bb217b449a00eb96d0584bb833a8b62b672a (patch)
treedf8b87e504ad076d92bb691e3ea966b29957ef4c /drivers
parent452d7a620dc38cb525c403aa4b445028da359268 (diff)
dm thin: defer whole cells rather than individual bios
This avoids dropping the cell, so increases the probability that other bios will collect within the cell, rather than being passed individually to the worker. Also add required process_cell and process_discard_cell error handling wrappers and set associated pool-mode function pointers accordingly. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-bio-prison.h1
-rw-r--r--drivers/md/dm-thin.c254
2 files changed, 208 insertions, 47 deletions
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 997a43960e77..c0cddb118582 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -35,6 +35,7 @@ struct dm_cell_key {
35 * themselves. 35 * themselves.
36 */ 36 */
37struct dm_bio_prison_cell { 37struct dm_bio_prison_cell {
38 struct list_head user_list; /* for client use */
38 struct rb_node node; 39 struct rb_node node;
39 40
40 struct dm_cell_key key; 41 struct dm_cell_key key;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 52562710f6a0..912d7f4d89d1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -202,6 +202,7 @@ struct pool_features {
202 202
203struct thin_c; 203struct thin_c;
204typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); 204typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
205typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
205typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); 206typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
206 207
207struct pool { 208struct pool {
@@ -246,6 +247,9 @@ struct pool {
246 process_bio_fn process_bio; 247 process_bio_fn process_bio;
247 process_bio_fn process_discard; 248 process_bio_fn process_discard;
248 249
250 process_cell_fn process_cell;
251 process_cell_fn process_discard_cell;
252
249 process_mapping_fn process_prepared_mapping; 253 process_mapping_fn process_prepared_mapping;
250 process_mapping_fn process_prepared_discard; 254 process_mapping_fn process_prepared_discard;
251}; 255};
@@ -282,6 +286,7 @@ struct thin_c {
282 struct dm_thin_device *td; 286 struct dm_thin_device *td;
283 bool requeue_mode:1; 287 bool requeue_mode:1;
284 spinlock_t lock; 288 spinlock_t lock;
289 struct list_head deferred_cells;
285 struct bio_list deferred_bio_list; 290 struct bio_list deferred_bio_list;
286 struct bio_list retry_on_resume_list; 291 struct bio_list retry_on_resume_list;
287 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 292 struct rb_root sort_bio_list; /* sorted list of deferred bios */
@@ -346,19 +351,6 @@ static void cell_release_no_holder(struct pool *pool,
346 dm_bio_prison_free_cell(pool->prison, cell); 351 dm_bio_prison_free_cell(pool->prison, cell);
347} 352}
348 353
349static void cell_defer_no_holder_no_free(struct thin_c *tc,
350 struct dm_bio_prison_cell *cell)
351{
352 struct pool *pool = tc->pool;
353 unsigned long flags;
354
355 spin_lock_irqsave(&tc->lock, flags);
356 dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
357 spin_unlock_irqrestore(&tc->lock, flags);
358
359 wake_worker(pool);
360}
361
362static void cell_error_with_code(struct pool *pool, 354static void cell_error_with_code(struct pool *pool,
363 struct dm_bio_prison_cell *cell, int error_code) 355 struct dm_bio_prison_cell *cell, int error_code)
364{ 356{
@@ -371,6 +363,16 @@ static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
371 cell_error_with_code(pool, cell, -EIO); 363 cell_error_with_code(pool, cell, -EIO);
372} 364}
373 365
366static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
367{
368 cell_error_with_code(pool, cell, 0);
369}
370
371static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
372{
373 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
374}
375
374/*----------------------------------------------------------------*/ 376/*----------------------------------------------------------------*/
375 377
376/* 378/*
@@ -458,10 +460,28 @@ static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
458 bio_endio(bio, DM_ENDIO_REQUEUE); 460 bio_endio(bio, DM_ENDIO_REQUEUE);
459} 461}
460 462
463static void requeue_deferred_cells(struct thin_c *tc)
464{
465 struct pool *pool = tc->pool;
466 unsigned long flags;
467 struct list_head cells;
468 struct dm_bio_prison_cell *cell, *tmp;
469
470 INIT_LIST_HEAD(&cells);
471
472 spin_lock_irqsave(&tc->lock, flags);
473 list_splice_init(&tc->deferred_cells, &cells);
474 spin_unlock_irqrestore(&tc->lock, flags);
475
476 list_for_each_entry_safe(cell, tmp, &cells, user_list)
477 cell_requeue(pool, cell);
478}
479
461static void requeue_io(struct thin_c *tc) 480static void requeue_io(struct thin_c *tc)
462{ 481{
463 requeue_bio_list(tc, &tc->deferred_bio_list); 482 requeue_bio_list(tc, &tc->deferred_bio_list);
464 requeue_bio_list(tc, &tc->retry_on_resume_list); 483 requeue_bio_list(tc, &tc->retry_on_resume_list);
484 requeue_deferred_cells(tc);
465} 485}
466 486
467static void error_thin_retry_list(struct thin_c *tc) 487static void error_thin_retry_list(struct thin_c *tc)
@@ -706,6 +726,28 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
706 wake_worker(pool); 726 wake_worker(pool);
707} 727}
708 728
729static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
730
731static void inc_remap_and_issue_cell(struct thin_c *tc,
732 struct dm_bio_prison_cell *cell,
733 dm_block_t block)
734{
735 struct bio *bio;
736 struct bio_list bios;
737
738 bio_list_init(&bios);
739 cell_release_no_holder(tc->pool, cell, &bios);
740
741 while ((bio = bio_list_pop(&bios))) {
742 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
743 thin_defer_bio(tc, bio);
744 else {
745 inc_all_io_entry(tc->pool, bio);
746 remap_and_issue(tc, bio, block);
747 }
748 }
749}
750
709static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 751static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
710{ 752{
711 if (m->bio) { 753 if (m->bio) {
@@ -1193,19 +1235,21 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
1193 retry_on_resume(bio); 1235 retry_on_resume(bio);
1194} 1236}
1195 1237
1196static void process_discard(struct thin_c *tc, struct bio *bio) 1238static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1197{ 1239{
1198 int r; 1240 int r;
1241 struct bio *bio = cell->holder;
1199 struct pool *pool = tc->pool; 1242 struct pool *pool = tc->pool;
1200 struct dm_bio_prison_cell *cell, *cell2; 1243 struct dm_bio_prison_cell *cell2;
1201 struct dm_cell_key key, key2; 1244 struct dm_cell_key key2;
1202 dm_block_t block = get_bio_block(tc, bio); 1245 dm_block_t block = get_bio_block(tc, bio);
1203 struct dm_thin_lookup_result lookup_result; 1246 struct dm_thin_lookup_result lookup_result;
1204 struct dm_thin_new_mapping *m; 1247 struct dm_thin_new_mapping *m;
1205 1248
1206 build_virtual_key(tc->td, block, &key); 1249 if (tc->requeue_mode) {
1207 if (bio_detain(tc->pool, &key, bio, &cell)) 1250 cell_requeue(pool, cell);
1208 return; 1251 return;
1252 }
1209 1253
1210 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1254 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1211 switch (r) { 1255 switch (r) {
@@ -1273,6 +1317,19 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1273 } 1317 }
1274} 1318}
1275 1319
1320static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1321{
1322 struct dm_bio_prison_cell *cell;
1323 struct dm_cell_key key;
1324 dm_block_t block = get_bio_block(tc, bio);
1325
1326 build_virtual_key(tc->td, block, &key);
1327 if (bio_detain(tc->pool, &key, bio, &cell))
1328 return;
1329
1330 process_discard_cell(tc, cell);
1331}
1332
1276static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1333static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1277 struct dm_cell_key *key, 1334 struct dm_cell_key *key,
1278 struct dm_thin_lookup_result *lookup_result, 1335 struct dm_thin_lookup_result *lookup_result,
@@ -1379,34 +1436,30 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1379 } 1436 }
1380} 1437}
1381 1438
1382static void process_bio(struct thin_c *tc, struct bio *bio) 1439static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1383{ 1440{
1384 int r; 1441 int r;
1385 struct pool *pool = tc->pool; 1442 struct pool *pool = tc->pool;
1443 struct bio *bio = cell->holder;
1386 dm_block_t block = get_bio_block(tc, bio); 1444 dm_block_t block = get_bio_block(tc, bio);
1387 struct dm_bio_prison_cell *cell;
1388 struct dm_cell_key key;
1389 struct dm_thin_lookup_result lookup_result; 1445 struct dm_thin_lookup_result lookup_result;
1390 1446
1391 /* 1447 if (tc->requeue_mode) {
1392 * If cell is already occupied, then the block is already 1448 cell_requeue(pool, cell);
1393 * being provisioned so we have nothing further to do here.
1394 */
1395 build_virtual_key(tc->td, block, &key);
1396 if (bio_detain(pool, &key, bio, &cell))
1397 return; 1449 return;
1450 }
1398 1451
1399 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1452 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1400 switch (r) { 1453 switch (r) {
1401 case 0: 1454 case 0:
1402 if (lookup_result.shared) { 1455 if (lookup_result.shared) {
1403 process_shared_bio(tc, bio, block, &lookup_result); 1456 process_shared_bio(tc, bio, block, &lookup_result);
1457 // FIXME: we can't remap because we're waiting on a commit
1404 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */ 1458 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1405 } else { 1459 } else {
1406 inc_all_io_entry(pool, bio); 1460 inc_all_io_entry(pool, bio);
1407 cell_defer_no_holder(tc, cell);
1408
1409 remap_and_issue(tc, bio, lookup_result.block); 1461 remap_and_issue(tc, bio, lookup_result.block);
1462 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1410 } 1463 }
1411 break; 1464 break;
1412 1465
@@ -1440,7 +1493,26 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1440 } 1493 }
1441} 1494}
1442 1495
1443static void process_bio_read_only(struct thin_c *tc, struct bio *bio) 1496static void process_bio(struct thin_c *tc, struct bio *bio)
1497{
1498 struct pool *pool = tc->pool;
1499 dm_block_t block = get_bio_block(tc, bio);
1500 struct dm_bio_prison_cell *cell;
1501 struct dm_cell_key key;
1502
1503 /*
1504 * If cell is already occupied, then the block is already
1505 * being provisioned so we have nothing further to do here.
1506 */
1507 build_virtual_key(tc->td, block, &key);
1508 if (bio_detain(pool, &key, bio, &cell))
1509 return;
1510
1511 process_cell(tc, cell);
1512}
1513
1514static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1515 struct dm_bio_prison_cell *cell)
1444{ 1516{
1445 int r; 1517 int r;
1446 int rw = bio_data_dir(bio); 1518 int rw = bio_data_dir(bio);
@@ -1450,15 +1522,21 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1450 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1522 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1451 switch (r) { 1523 switch (r) {
1452 case 0: 1524 case 0:
1453 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) 1525 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
1454 handle_unserviceable_bio(tc->pool, bio); 1526 handle_unserviceable_bio(tc->pool, bio);
1455 else { 1527 if (cell)
1528 cell_defer_no_holder(tc, cell);
1529 } else {
1456 inc_all_io_entry(tc->pool, bio); 1530 inc_all_io_entry(tc->pool, bio);
1457 remap_and_issue(tc, bio, lookup_result.block); 1531 remap_and_issue(tc, bio, lookup_result.block);
1532 if (cell)
1533 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1458 } 1534 }
1459 break; 1535 break;
1460 1536
1461 case -ENODATA: 1537 case -ENODATA:
1538 if (cell)
1539 cell_defer_no_holder(tc, cell);
1462 if (rw != READ) { 1540 if (rw != READ) {
1463 handle_unserviceable_bio(tc->pool, bio); 1541 handle_unserviceable_bio(tc->pool, bio);
1464 break; 1542 break;
@@ -1477,11 +1555,23 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1477 default: 1555 default:
1478 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1556 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1479 __func__, r); 1557 __func__, r);
1558 if (cell)
1559 cell_defer_no_holder(tc, cell);
1480 bio_io_error(bio); 1560 bio_io_error(bio);
1481 break; 1561 break;
1482 } 1562 }
1483} 1563}
1484 1564
1565static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1566{
1567 __process_bio_read_only(tc, bio, NULL);
1568}
1569
1570static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1571{
1572 __process_bio_read_only(tc, cell->holder, cell);
1573}
1574
1485static void process_bio_success(struct thin_c *tc, struct bio *bio) 1575static void process_bio_success(struct thin_c *tc, struct bio *bio)
1486{ 1576{
1487 bio_endio(bio, 0); 1577 bio_endio(bio, 0);
@@ -1492,6 +1582,16 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1492 bio_io_error(bio); 1582 bio_io_error(bio);
1493} 1583}
1494 1584
1585static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1586{
1587 cell_success(tc->pool, cell);
1588}
1589
1590static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1591{
1592 cell_error(tc->pool, cell);
1593}
1594
1495/* 1595/*
1496 * FIXME: should we also commit due to size of transaction, measured in 1596 * FIXME: should we also commit due to size of transaction, measured in
1497 * metadata blocks? 1597 * metadata blocks?
@@ -1624,6 +1724,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1624 blk_finish_plug(&plug); 1724 blk_finish_plug(&plug);
1625} 1725}
1626 1726
1727static void process_thin_deferred_cells(struct thin_c *tc)
1728{
1729 struct pool *pool = tc->pool;
1730 unsigned long flags;
1731 struct list_head cells;
1732 struct dm_bio_prison_cell *cell, *tmp;
1733
1734 INIT_LIST_HEAD(&cells);
1735
1736 spin_lock_irqsave(&tc->lock, flags);
1737 list_splice_init(&tc->deferred_cells, &cells);
1738 spin_unlock_irqrestore(&tc->lock, flags);
1739
1740 if (list_empty(&cells))
1741 return;
1742
1743 list_for_each_entry_safe(cell, tmp, &cells, user_list) {
1744 BUG_ON(!cell->holder);
1745
1746 /*
1747 * If we've got no free new_mapping structs, and processing
1748 * this bio might require one, we pause until there are some
1749 * prepared mappings to process.
1750 */
1751 if (ensure_next_mapping(pool)) {
1752 spin_lock_irqsave(&tc->lock, flags);
1753 list_add(&cell->user_list, &tc->deferred_cells);
1754 list_splice(&cells, &tc->deferred_cells);
1755 spin_unlock_irqrestore(&tc->lock, flags);
1756 break;
1757 }
1758
1759 if (cell->holder->bi_rw & REQ_DISCARD)
1760 pool->process_discard_cell(tc, cell);
1761 else
1762 pool->process_cell(tc, cell);
1763 }
1764}
1765
1627static void thin_get(struct thin_c *tc); 1766static void thin_get(struct thin_c *tc);
1628static void thin_put(struct thin_c *tc); 1767static void thin_put(struct thin_c *tc);
1629 1768
@@ -1672,6 +1811,7 @@ static void process_deferred_bios(struct pool *pool)
1672 1811
1673 tc = get_first_thin(pool); 1812 tc = get_first_thin(pool);
1674 while (tc) { 1813 while (tc) {
1814 process_thin_deferred_cells(tc);
1675 process_thin_deferred_bios(tc); 1815 process_thin_deferred_bios(tc);
1676 tc = get_next_thin(pool, tc); 1816 tc = get_next_thin(pool, tc);
1677 } 1817 }
@@ -1850,6 +1990,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1850 dm_pool_metadata_read_only(pool->pmd); 1990 dm_pool_metadata_read_only(pool->pmd);
1851 pool->process_bio = process_bio_fail; 1991 pool->process_bio = process_bio_fail;
1852 pool->process_discard = process_bio_fail; 1992 pool->process_discard = process_bio_fail;
1993 pool->process_cell = process_cell_fail;
1994 pool->process_discard_cell = process_cell_fail;
1853 pool->process_prepared_mapping = process_prepared_mapping_fail; 1995 pool->process_prepared_mapping = process_prepared_mapping_fail;
1854 pool->process_prepared_discard = process_prepared_discard_fail; 1996 pool->process_prepared_discard = process_prepared_discard_fail;
1855 1997
@@ -1862,6 +2004,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1862 dm_pool_metadata_read_only(pool->pmd); 2004 dm_pool_metadata_read_only(pool->pmd);
1863 pool->process_bio = process_bio_read_only; 2005 pool->process_bio = process_bio_read_only;
1864 pool->process_discard = process_bio_success; 2006 pool->process_discard = process_bio_success;
2007 pool->process_cell = process_cell_read_only;
2008 pool->process_discard_cell = process_cell_success;
1865 pool->process_prepared_mapping = process_prepared_mapping_fail; 2009 pool->process_prepared_mapping = process_prepared_mapping_fail;
1866 pool->process_prepared_discard = process_prepared_discard_passdown; 2010 pool->process_prepared_discard = process_prepared_discard_passdown;
1867 2011
@@ -1880,7 +2024,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1880 if (old_mode != new_mode) 2024 if (old_mode != new_mode)
1881 notify_of_pool_mode_change(pool, "out-of-data-space"); 2025 notify_of_pool_mode_change(pool, "out-of-data-space");
1882 pool->process_bio = process_bio_read_only; 2026 pool->process_bio = process_bio_read_only;
1883 pool->process_discard = process_discard; 2027 pool->process_discard = process_discard_bio;
2028 pool->process_cell = process_cell_read_only;
2029 pool->process_discard_cell = process_discard_cell;
1884 pool->process_prepared_mapping = process_prepared_mapping; 2030 pool->process_prepared_mapping = process_prepared_mapping;
1885 pool->process_prepared_discard = process_prepared_discard_passdown; 2031 pool->process_prepared_discard = process_prepared_discard_passdown;
1886 2032
@@ -1893,7 +2039,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1893 notify_of_pool_mode_change(pool, "write"); 2039 notify_of_pool_mode_change(pool, "write");
1894 dm_pool_metadata_read_write(pool->pmd); 2040 dm_pool_metadata_read_write(pool->pmd);
1895 pool->process_bio = process_bio; 2041 pool->process_bio = process_bio;
1896 pool->process_discard = process_discard; 2042 pool->process_discard = process_discard_bio;
2043 pool->process_cell = process_cell;
2044 pool->process_discard_cell = process_discard_cell;
1897 pool->process_prepared_mapping = process_prepared_mapping; 2045 pool->process_prepared_mapping = process_prepared_mapping;
1898 pool->process_prepared_discard = process_prepared_discard; 2046 pool->process_prepared_discard = process_prepared_discard;
1899 break; 2047 break;
@@ -1962,6 +2110,20 @@ static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
1962 throttle_unlock(&pool->throttle); 2110 throttle_unlock(&pool->throttle);
1963} 2111}
1964 2112
2113static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2114{
2115 unsigned long flags;
2116 struct pool *pool = tc->pool;
2117
2118 throttle_lock(&pool->throttle);
2119 spin_lock_irqsave(&tc->lock, flags);
2120 list_add_tail(&cell->user_list, &tc->deferred_cells);
2121 spin_unlock_irqrestore(&tc->lock, flags);
2122 throttle_unlock(&pool->throttle);
2123
2124 wake_worker(pool);
2125}
2126
1965static void thin_hook_bio(struct thin_c *tc, struct bio *bio) 2127static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1966{ 2128{
1967 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2129 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1982,8 +2144,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1982 dm_block_t block = get_bio_block(tc, bio); 2144 dm_block_t block = get_bio_block(tc, bio);
1983 struct dm_thin_device *td = tc->td; 2145 struct dm_thin_device *td = tc->td;
1984 struct dm_thin_lookup_result result; 2146 struct dm_thin_lookup_result result;
1985 struct dm_bio_prison_cell cell1, cell2; 2147 struct dm_bio_prison_cell *virt_cell, *data_cell;
1986 struct dm_bio_prison_cell *cell_result;
1987 struct dm_cell_key key; 2148 struct dm_cell_key key;
1988 2149
1989 thin_hook_bio(tc, bio); 2150 thin_hook_bio(tc, bio);
@@ -2008,7 +2169,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2008 * there's a race with discard. 2169 * there's a race with discard.
2009 */ 2170 */
2010 build_virtual_key(tc->td, block, &key); 2171 build_virtual_key(tc->td, block, &key);
2011 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) 2172 if (bio_detain(tc->pool, &key, bio, &virt_cell))
2012 return DM_MAPIO_SUBMITTED; 2173 return DM_MAPIO_SUBMITTED;
2013 2174
2014 r = dm_thin_find_block(td, block, 0, &result); 2175 r = dm_thin_find_block(td, block, 0, &result);
@@ -2033,20 +2194,19 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2033 * More distant ancestors are irrelevant. The 2194 * More distant ancestors are irrelevant. The
2034 * shared flag will be set in their case. 2195 * shared flag will be set in their case.
2035 */ 2196 */
2036 thin_defer_bio(tc, bio); 2197 thin_defer_cell(tc, virt_cell);
2037 cell_defer_no_holder_no_free(tc, &cell1);
2038 return DM_MAPIO_SUBMITTED; 2198 return DM_MAPIO_SUBMITTED;
2039 } 2199 }
2040 2200
2041 build_data_key(tc->td, result.block, &key); 2201 build_data_key(tc->td, result.block, &key);
2042 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { 2202 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2043 cell_defer_no_holder_no_free(tc, &cell1); 2203 cell_defer_no_holder(tc, virt_cell);
2044 return DM_MAPIO_SUBMITTED; 2204 return DM_MAPIO_SUBMITTED;
2045 } 2205 }
2046 2206
2047 inc_all_io_entry(tc->pool, bio); 2207 inc_all_io_entry(tc->pool, bio);
2048 cell_defer_no_holder_no_free(tc, &cell2); 2208 cell_defer_no_holder(tc, data_cell);
2049 cell_defer_no_holder_no_free(tc, &cell1); 2209 cell_defer_no_holder(tc, virt_cell);
2050 2210
2051 remap(tc, bio, result.block); 2211 remap(tc, bio, result.block);
2052 return DM_MAPIO_REMAPPED; 2212 return DM_MAPIO_REMAPPED;
@@ -2058,14 +2218,13 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2058 * of doing so. 2218 * of doing so.
2059 */ 2219 */
2060 handle_unserviceable_bio(tc->pool, bio); 2220 handle_unserviceable_bio(tc->pool, bio);
2061 cell_defer_no_holder_no_free(tc, &cell1); 2221 cell_defer_no_holder(tc, virt_cell);
2062 return DM_MAPIO_SUBMITTED; 2222 return DM_MAPIO_SUBMITTED;
2063 } 2223 }
2064 /* fall through */ 2224 /* fall through */
2065 2225
2066 case -EWOULDBLOCK: 2226 case -EWOULDBLOCK:
2067 thin_defer_bio(tc, bio); 2227 thin_defer_cell(tc, virt_cell);
2068 cell_defer_no_holder_no_free(tc, &cell1);
2069 return DM_MAPIO_SUBMITTED; 2228 return DM_MAPIO_SUBMITTED;
2070 2229
2071 default: 2230 default:
@@ -2075,7 +2234,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2075 * pool is switched to fail-io mode. 2234 * pool is switched to fail-io mode.
2076 */ 2235 */
2077 bio_io_error(bio); 2236 bio_io_error(bio);
2078 cell_defer_no_holder_no_free(tc, &cell1); 2237 cell_defer_no_holder(tc, virt_cell);
2079 return DM_MAPIO_SUBMITTED; 2238 return DM_MAPIO_SUBMITTED;
2080 } 2239 }
2081} 2240}
@@ -3394,6 +3553,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3394 goto out_unlock; 3553 goto out_unlock;
3395 } 3554 }
3396 spin_lock_init(&tc->lock); 3555 spin_lock_init(&tc->lock);
3556 INIT_LIST_HEAD(&tc->deferred_cells);
3397 bio_list_init(&tc->deferred_bio_list); 3557 bio_list_init(&tc->deferred_bio_list);
3398 bio_list_init(&tc->retry_on_resume_list); 3558 bio_list_init(&tc->retry_on_resume_list);
3399 tc->sort_bio_list = RB_ROOT; 3559 tc->sort_bio_list = RB_ROOT;