aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2015-05-15 10:26:08 -0400
committerMike Snitzer <snitzer@redhat.com>2015-05-29 14:19:06 -0400
commit651f5fa2a3959ff5db60c09a84efd66309fe4035 (patch)
tree668234af9786cfef84acd39fdcc4691a5fec5ab7 /drivers/md/dm-cache-target.c
parent3cdf93f9d85979b22b6abfd4ab19350860e4dfac (diff)
dm cache: defer whole cells
Currently individual bios are deferred to the worker thread if they cannot be processed immediately (eg, a block is in the process of being moved to the fast device). This patch passes whole cells across to the worker. This saves reaquiring the cell, and also collects bios destined for the same block together, which allows them to be mapped with a single look up to the policy. This reduces the overhead of using dm-cache. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c325
1 files changed, 262 insertions, 63 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5d3b20b91ba3..d2d91c164420 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -257,6 +257,7 @@ struct cache {
257 int sectors_per_block_shift; 257 int sectors_per_block_shift;
258 258
259 spinlock_t lock; 259 spinlock_t lock;
260 struct list_head deferred_cells;
260 struct bio_list deferred_bios; 261 struct bio_list deferred_bios;
261 struct bio_list deferred_flush_bios; 262 struct bio_list deferred_flush_bios;
262 struct bio_list deferred_writethrough_bios; 263 struct bio_list deferred_writethrough_bios;
@@ -969,26 +970,63 @@ static void dec_io_migrations(struct cache *cache)
969 atomic_dec(&cache->nr_io_migrations); 970 atomic_dec(&cache->nr_io_migrations);
970} 971}
971 972
972static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 973static void __cell_release(struct cache *cache, struct dm_bio_prison_cell *cell,
973 bool holder) 974 bool holder, struct bio_list *bios)
974{ 975{
975 (holder ? dm_cell_release : dm_cell_release_no_holder) 976 (holder ? dm_cell_release : dm_cell_release_no_holder)
976 (cache->prison, cell, &cache->deferred_bios); 977 (cache->prison, cell, bios);
977 free_prison_cell(cache, cell); 978 free_prison_cell(cache, cell);
978} 979}
979 980
980static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 981static bool discard_or_flush(struct bio *bio)
981 bool holder) 982{
983 return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
984}
985
986static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
987{
988 if (discard_or_flush(cell->holder))
989 /*
990 * We have to handle these bios
991 * individually.
992 */
993 __cell_release(cache, cell, true, &cache->deferred_bios);
994
995 else
996 list_add_tail(&cell->user_list, &cache->deferred_cells);
997}
998
999static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder)
982{ 1000{
983 unsigned long flags; 1001 unsigned long flags;
984 1002
1003 if (!holder && dm_cell_promote_or_release(cache->prison, cell)) {
1004 /*
1005 * There was no prisoner to promote to holder, the
1006 * cell has been released.
1007 */
1008 free_prison_cell(cache, cell);
1009 return;
1010 }
1011
985 spin_lock_irqsave(&cache->lock, flags); 1012 spin_lock_irqsave(&cache->lock, flags);
986 __cell_defer(cache, cell, holder); 1013 __cell_defer(cache, cell);
987 spin_unlock_irqrestore(&cache->lock, flags); 1014 spin_unlock_irqrestore(&cache->lock, flags);
988 1015
989 wake_worker(cache); 1016 wake_worker(cache);
990} 1017}
991 1018
1019static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
1020{
1021 dm_cell_error(cache->prison, cell, err);
1022 dm_bio_prison_free_cell(cache->prison, cell);
1023}
1024
1025static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
1026{
1027 cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE);
1028}
1029
992static void free_io_migration(struct dm_cache_migration *mg) 1030static void free_io_migration(struct dm_cache_migration *mg)
993{ 1031{
994 dec_io_migrations(mg->cache); 1032 dec_io_migrations(mg->cache);
@@ -1525,6 +1563,107 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
1525 1563
1526/*----------------------------------------------------------------*/ 1564/*----------------------------------------------------------------*/
1527 1565
1566struct inc_detail {
1567 struct cache *cache;
1568 struct bio_list bios_for_issue;
1569 struct bio_list unhandled_bios;
1570 bool any_writes;
1571};
1572
1573static void inc_fn(void *context, struct dm_bio_prison_cell *cell)
1574{
1575 struct bio *bio;
1576 struct inc_detail *detail = context;
1577 struct cache *cache = detail->cache;
1578
1579 inc_ds(cache, cell->holder, cell);
1580 if (bio_data_dir(cell->holder) == WRITE)
1581 detail->any_writes = true;
1582
1583 while ((bio = bio_list_pop(&cell->bios))) {
1584 if (discard_or_flush(bio)) {
1585 bio_list_add(&detail->unhandled_bios, bio);
1586 continue;
1587 }
1588
1589 if (bio_data_dir(bio) == WRITE)
1590 detail->any_writes = true;
1591
1592 bio_list_add(&detail->bios_for_issue, bio);
1593 inc_ds(cache, bio, cell);
1594 }
1595}
1596
1597// FIXME: refactor these two
1598static void remap_cell_to_origin_clear_discard(struct cache *cache,
1599 struct dm_bio_prison_cell *cell,
1600 dm_oblock_t oblock, bool issue_holder)
1601{
1602 struct bio *bio;
1603 unsigned long flags;
1604 struct inc_detail detail;
1605
1606 detail.cache = cache;
1607 bio_list_init(&detail.bios_for_issue);
1608 bio_list_init(&detail.unhandled_bios);
1609 detail.any_writes = false;
1610
1611 spin_lock_irqsave(&cache->lock, flags);
1612 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
1613 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
1614 spin_unlock_irqrestore(&cache->lock, flags);
1615
1616 remap_to_origin(cache, cell->holder);
1617 if (issue_holder)
1618 issue(cache, cell->holder);
1619 else
1620 accounted_begin(cache, cell->holder);
1621
1622 if (detail.any_writes)
1623 clear_discard(cache, oblock_to_dblock(cache, oblock));
1624
1625 while ((bio = bio_list_pop(&detail.bios_for_issue))) {
1626 remap_to_origin(cache, bio);
1627 issue(cache, bio);
1628 }
1629}
1630
1631static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
1632 dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder)
1633{
1634 struct bio *bio;
1635 unsigned long flags;
1636 struct inc_detail detail;
1637
1638 detail.cache = cache;
1639 bio_list_init(&detail.bios_for_issue);
1640 bio_list_init(&detail.unhandled_bios);
1641 detail.any_writes = false;
1642
1643 spin_lock_irqsave(&cache->lock, flags);
1644 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
1645 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
1646 spin_unlock_irqrestore(&cache->lock, flags);
1647
1648 remap_to_cache(cache, cell->holder, cblock);
1649 if (issue_holder)
1650 issue(cache, cell->holder);
1651 else
1652 accounted_begin(cache, cell->holder);
1653
1654 if (detail.any_writes) {
1655 set_dirty(cache, oblock, cblock);
1656 clear_discard(cache, oblock_to_dblock(cache, oblock));
1657 }
1658
1659 while ((bio = bio_list_pop(&detail.bios_for_issue))) {
1660 remap_to_cache(cache, bio, cblock);
1661 issue(cache, bio);
1662 }
1663}
1664
1665/*----------------------------------------------------------------*/
1666
1528struct old_oblock_lock { 1667struct old_oblock_lock {
1529 struct policy_locker locker; 1668 struct policy_locker locker;
1530 struct cache *cache; 1669 struct cache *cache;
@@ -1549,28 +1688,18 @@ static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
1549 l->structs, &l->cell); 1688 l->structs, &l->cell);
1550} 1689}
1551 1690
1552static void process_bio(struct cache *cache, struct prealloc *structs, 1691static void process_cell(struct cache *cache, struct prealloc *structs,
1553 struct bio *bio) 1692 struct dm_bio_prison_cell *new_ocell)
1554{ 1693{
1555 int r; 1694 int r;
1556 bool release_cell = true; 1695 bool release_cell = true;
1696 struct bio *bio = new_ocell->holder;
1557 dm_oblock_t block = get_bio_block(cache, bio); 1697 dm_oblock_t block = get_bio_block(cache, bio);
1558 struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
1559 struct policy_result lookup_result; 1698 struct policy_result lookup_result;
1560 bool passthrough = passthrough_mode(&cache->features); 1699 bool passthrough = passthrough_mode(&cache->features);
1561 bool discarded_block, can_migrate; 1700 bool discarded_block, can_migrate;
1562 struct old_oblock_lock ool; 1701 struct old_oblock_lock ool;
1563 1702
1564 /*
1565 * Check to see if that block is currently migrating.
1566 */
1567 cell_prealloc = prealloc_get_cell(structs);
1568 r = bio_detain(cache, block, bio, cell_prealloc,
1569 (cell_free_fn) prealloc_put_cell,
1570 structs, &new_ocell);
1571 if (r > 0)
1572 return;
1573
1574 discarded_block = is_discarded_oblock(cache, block); 1703 discarded_block = is_discarded_oblock(cache, block);
1575 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1704 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
1576 1705
@@ -1615,9 +1744,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1615 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 1744 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1616 inc_and_issue(cache, bio, new_ocell); 1745 inc_and_issue(cache, bio, new_ocell);
1617 1746
1618 } else { 1747 } else {
1619 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 1748 remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true);
1620 inc_and_issue(cache, bio, new_ocell); 1749 release_cell = false;
1621 } 1750 }
1622 } 1751 }
1623 1752
@@ -1625,8 +1754,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1625 1754
1626 case POLICY_MISS: 1755 case POLICY_MISS:
1627 inc_miss_counter(cache, bio); 1756 inc_miss_counter(cache, bio);
1628 remap_to_origin_clear_discard(cache, bio, block); 1757 remap_cell_to_origin_clear_discard(cache, new_ocell, block, true);
1629 inc_and_issue(cache, bio, new_ocell); 1758 release_cell = false;
1630 break; 1759 break;
1631 1760
1632 case POLICY_NEW: 1761 case POLICY_NEW:
@@ -1654,10 +1783,30 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1654 cell_defer(cache, new_ocell, false); 1783 cell_defer(cache, new_ocell, false);
1655} 1784}
1656 1785
1786static void process_bio(struct cache *cache, struct prealloc *structs,
1787 struct bio *bio)
1788{
1789 int r;
1790 dm_oblock_t block = get_bio_block(cache, bio);
1791 struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
1792
1793 /*
1794 * Check to see if that block is currently migrating.
1795 */
1796 cell_prealloc = prealloc_get_cell(structs);
1797 r = bio_detain(cache, block, bio, cell_prealloc,
1798 (cell_free_fn) prealloc_put_cell,
1799 structs, &new_ocell);
1800 if (r > 0)
1801 return;
1802
1803 process_cell(cache, structs, new_ocell);
1804}
1805
1657static int need_commit_due_to_time(struct cache *cache) 1806static int need_commit_due_to_time(struct cache *cache)
1658{ 1807{
1659 return !time_in_range(jiffies, cache->last_commit_jiffies, 1808 return jiffies < cache->last_commit_jiffies ||
1660 cache->last_commit_jiffies + COMMIT_PERIOD); 1809 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1661} 1810}
1662 1811
1663static int commit_if_needed(struct cache *cache) 1812static int commit_if_needed(struct cache *cache)
@@ -1716,6 +1865,40 @@ static void process_deferred_bios(struct cache *cache)
1716 prealloc_free_structs(cache, &structs); 1865 prealloc_free_structs(cache, &structs);
1717} 1866}
1718 1867
1868static void process_deferred_cells(struct cache *cache)
1869{
1870 unsigned long flags;
1871 struct dm_bio_prison_cell *cell, *tmp;
1872 struct list_head cells;
1873 struct prealloc structs;
1874
1875 memset(&structs, 0, sizeof(structs));
1876
1877 INIT_LIST_HEAD(&cells);
1878
1879 spin_lock_irqsave(&cache->lock, flags);
1880 list_splice_init(&cache->deferred_cells, &cells);
1881 spin_unlock_irqrestore(&cache->lock, flags);
1882
1883 list_for_each_entry_safe(cell, tmp, &cells, user_list) {
1884 /*
1885 * If we've got no free migration structs, and processing
1886 * this bio might require one, we pause until there are some
1887 * prepared mappings to process.
1888 */
1889 if (prealloc_data_structs(cache, &structs)) {
1890 spin_lock_irqsave(&cache->lock, flags);
1891 list_splice(&cells, &cache->deferred_cells);
1892 spin_unlock_irqrestore(&cache->lock, flags);
1893 break;
1894 }
1895
1896 process_cell(cache, &structs, cell);
1897 }
1898
1899 prealloc_free_structs(cache, &structs);
1900}
1901
1719static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) 1902static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1720{ 1903{
1721 unsigned long flags; 1904 unsigned long flags;
@@ -1883,7 +2066,22 @@ static void stop_worker(struct cache *cache)
1883 flush_workqueue(cache->wq); 2066 flush_workqueue(cache->wq);
1884} 2067}
1885 2068
1886static void requeue_deferred_io(struct cache *cache) 2069static void requeue_deferred_cells(struct cache *cache)
2070{
2071 unsigned long flags;
2072 struct list_head cells;
2073 struct dm_bio_prison_cell *cell, *tmp;
2074
2075 INIT_LIST_HEAD(&cells);
2076 spin_lock_irqsave(&cache->lock, flags);
2077 list_splice_init(&cache->deferred_cells, &cells);
2078 spin_unlock_irqrestore(&cache->lock, flags);
2079
2080 list_for_each_entry_safe(cell, tmp, &cells, user_list)
2081 cell_requeue(cache, cell);
2082}
2083
2084static void requeue_deferred_bios(struct cache *cache)
1887{ 2085{
1888 struct bio *bio; 2086 struct bio *bio;
1889 struct bio_list bios; 2087 struct bio_list bios;
@@ -1904,6 +2102,7 @@ static int more_work(struct cache *cache)
1904 !list_empty(&cache->need_commit_migrations); 2102 !list_empty(&cache->need_commit_migrations);
1905 else 2103 else
1906 return !bio_list_empty(&cache->deferred_bios) || 2104 return !bio_list_empty(&cache->deferred_bios) ||
2105 !list_empty(&cache->deferred_cells) ||
1907 !bio_list_empty(&cache->deferred_flush_bios) || 2106 !bio_list_empty(&cache->deferred_flush_bios) ||
1908 !bio_list_empty(&cache->deferred_writethrough_bios) || 2107 !bio_list_empty(&cache->deferred_writethrough_bios) ||
1909 !list_empty(&cache->quiesced_migrations) || 2108 !list_empty(&cache->quiesced_migrations) ||
@@ -1921,6 +2120,7 @@ static void do_worker(struct work_struct *ws)
1921 writeback_some_dirty_blocks(cache); 2120 writeback_some_dirty_blocks(cache);
1922 process_deferred_writethrough_bios(cache); 2121 process_deferred_writethrough_bios(cache);
1923 process_deferred_bios(cache); 2122 process_deferred_bios(cache);
2123 process_deferred_cells(cache);
1924 process_invalidation_requests(cache); 2124 process_invalidation_requests(cache);
1925 } 2125 }
1926 2126
@@ -1935,6 +2135,7 @@ static void do_worker(struct work_struct *ws)
1935 * FIXME: rollback metadata or just go into a 2135 * FIXME: rollback metadata or just go into a
1936 * failure mode and error everything 2136 * failure mode and error everything
1937 */ 2137 */
2138
1938 } else { 2139 } else {
1939 process_deferred_flush_bios(cache, true); 2140 process_deferred_flush_bios(cache, true);
1940 process_migrations(cache, &cache->need_commit_migrations, 2141 process_migrations(cache, &cache->need_commit_migrations,
@@ -2525,6 +2726,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2525 } 2726 }
2526 2727
2527 spin_lock_init(&cache->lock); 2728 spin_lock_init(&cache->lock);
2729 INIT_LIST_HEAD(&cache->deferred_cells);
2528 bio_list_init(&cache->deferred_bios); 2730 bio_list_init(&cache->deferred_bios);
2529 bio_list_init(&cache->deferred_flush_bios); 2731 bio_list_init(&cache->deferred_flush_bios);
2530 bio_list_init(&cache->deferred_writethrough_bios); 2732 bio_list_init(&cache->deferred_writethrough_bios);
@@ -2682,9 +2884,14 @@ out:
2682 return r; 2884 return r;
2683} 2885}
2684 2886
2685static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) 2887/*----------------------------------------------------------------*/
2888
2889static int cache_map(struct dm_target *ti, struct bio *bio)
2686{ 2890{
2891 struct cache *cache = ti->private;
2892
2687 int r; 2893 int r;
2894 struct dm_bio_prison_cell *cell = NULL;
2688 dm_oblock_t block = get_bio_block(cache, bio); 2895 dm_oblock_t block = get_bio_block(cache, bio);
2689 size_t pb_data_size = get_per_bio_data_size(cache); 2896 size_t pb_data_size = get_per_bio_data_size(cache);
2690 bool can_migrate = false; 2897 bool can_migrate = false;
@@ -2702,10 +2909,11 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2702 * Just remap to the origin and carry on. 2909 * Just remap to the origin and carry on.
2703 */ 2910 */
2704 remap_to_origin(cache, bio); 2911 remap_to_origin(cache, bio);
2912 accounted_begin(cache, bio);
2705 return DM_MAPIO_REMAPPED; 2913 return DM_MAPIO_REMAPPED;
2706 } 2914 }
2707 2915
2708 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2916 if (discard_or_flush(bio)) {
2709 defer_bio(cache, bio); 2917 defer_bio(cache, bio);
2710 return DM_MAPIO_SUBMITTED; 2918 return DM_MAPIO_SUBMITTED;
2711 } 2919 }
@@ -2713,15 +2921,15 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2713 /* 2921 /*
2714 * Check to see if that block is currently migrating. 2922 * Check to see if that block is currently migrating.
2715 */ 2923 */
2716 *cell = alloc_prison_cell(cache); 2924 cell = alloc_prison_cell(cache);
2717 if (!*cell) { 2925 if (!cell) {
2718 defer_bio(cache, bio); 2926 defer_bio(cache, bio);
2719 return DM_MAPIO_SUBMITTED; 2927 return DM_MAPIO_SUBMITTED;
2720 } 2928 }
2721 2929
2722 r = bio_detain(cache, block, bio, *cell, 2930 r = bio_detain(cache, block, bio, cell,
2723 (cell_free_fn) free_prison_cell, 2931 (cell_free_fn) free_prison_cell,
2724 cache, cell); 2932 cache, &cell);
2725 if (r) { 2933 if (r) {
2726 if (r < 0) 2934 if (r < 0)
2727 defer_bio(cache, bio); 2935 defer_bio(cache, bio);
@@ -2734,12 +2942,12 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2734 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 2942 r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2735 bio, &ool.locker, &lookup_result); 2943 bio, &ool.locker, &lookup_result);
2736 if (r == -EWOULDBLOCK) { 2944 if (r == -EWOULDBLOCK) {
2737 cell_defer(cache, *cell, true); 2945 cell_defer(cache, cell, true);
2738 return DM_MAPIO_SUBMITTED; 2946 return DM_MAPIO_SUBMITTED;
2739 2947
2740 } else if (r) { 2948 } else if (r) {
2741 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 2949 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2742 cell_defer(cache, *cell, false); 2950 cell_defer(cache, cell, false);
2743 bio_io_error(bio); 2951 bio_io_error(bio);
2744 return DM_MAPIO_SUBMITTED; 2952 return DM_MAPIO_SUBMITTED;
2745 } 2953 }
@@ -2753,21 +2961,30 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2753 * We need to invalidate this block, so 2961 * We need to invalidate this block, so
2754 * defer for the worker thread. 2962 * defer for the worker thread.
2755 */ 2963 */
2756 cell_defer(cache, *cell, true); 2964 cell_defer(cache, cell, true);
2757 r = DM_MAPIO_SUBMITTED; 2965 r = DM_MAPIO_SUBMITTED;
2758 2966
2759 } else { 2967 } else {
2760 inc_miss_counter(cache, bio); 2968 inc_miss_counter(cache, bio);
2761 remap_to_origin_clear_discard(cache, bio, block); 2969 remap_to_origin_clear_discard(cache, bio, block);
2970 accounted_begin(cache, bio);
2971 inc_ds(cache, bio, cell);
2972 // FIXME: we want to remap hits or misses straight
2973 // away rather than passing over to the worker.
2974 cell_defer(cache, cell, false);
2762 } 2975 }
2763 2976
2764 } else { 2977 } else {
2765 inc_hit_counter(cache, bio); 2978 inc_hit_counter(cache, bio);
2766 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2979 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2767 !is_dirty(cache, lookup_result.cblock)) 2980 !is_dirty(cache, lookup_result.cblock)) {
2768 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 2981 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2769 else 2982 accounted_begin(cache, bio);
2770 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 2983 inc_ds(cache, bio, cell);
2984 cell_defer(cache, cell, false);
2985
2986 } else
2987 remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false);
2771 } 2988 }
2772 break; 2989 break;
2773 2990
@@ -2779,18 +2996,18 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2779 * longer needed because the block has been demoted. 2996 * longer needed because the block has been demoted.
2780 */ 2997 */
2781 bio_endio(bio, 0); 2998 bio_endio(bio, 0);
2782 cell_defer(cache, *cell, false); 2999 // FIXME: remap everything as a miss
3000 cell_defer(cache, cell, false);
2783 r = DM_MAPIO_SUBMITTED; 3001 r = DM_MAPIO_SUBMITTED;
2784 3002
2785 } else 3003 } else
2786 remap_to_origin_clear_discard(cache, bio, block); 3004 remap_cell_to_origin_clear_discard(cache, cell, block, false);
2787
2788 break; 3005 break;
2789 3006
2790 default: 3007 default:
2791 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 3008 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2792 (unsigned) lookup_result.op); 3009 (unsigned) lookup_result.op);
2793 cell_defer(cache, *cell, false); 3010 cell_defer(cache, cell, false);
2794 bio_io_error(bio); 3011 bio_io_error(bio);
2795 r = DM_MAPIO_SUBMITTED; 3012 r = DM_MAPIO_SUBMITTED;
2796 } 3013 }
@@ -2798,25 +3015,6 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2798 return r; 3015 return r;
2799} 3016}
2800 3017
2801static int cache_map(struct dm_target *ti, struct bio *bio)
2802{
2803 int r;
2804 struct dm_bio_prison_cell *cell = NULL;
2805 struct cache *cache = ti->private;
2806
2807 r = __cache_map(cache, bio, &cell);
2808 if (r == DM_MAPIO_REMAPPED) {
2809 accounted_begin(cache, bio);
2810
2811 if (cell) {
2812 inc_ds(cache, bio, cell);
2813 cell_defer(cache, cell, false);
2814 }
2815 }
2816
2817 return r;
2818}
2819
2820static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) 3018static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2821{ 3019{
2822 struct cache *cache = ti->private; 3020 struct cache *cache = ti->private;
@@ -2913,7 +3111,8 @@ static void cache_postsuspend(struct dm_target *ti)
2913 start_quiescing(cache); 3111 start_quiescing(cache);
2914 wait_for_migrations(cache); 3112 wait_for_migrations(cache);
2915 stop_worker(cache); 3113 stop_worker(cache);
2916 requeue_deferred_io(cache); 3114 requeue_deferred_bios(cache);
3115 requeue_deferred_cells(cache);
2917 stop_quiescing(cache); 3116 stop_quiescing(cache);
2918 3117
2919 (void) sync_metadata(cache); 3118 (void) sync_metadata(cache);