aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-11-24 09:06:22 -0500
committerMike Snitzer <snitzer@redhat.com>2014-12-01 11:30:10 -0500
commit3e2e1c3098fcc02369f0eea822d0a7914b691567 (patch)
treed7ccfeacc50f7e3b730daad45f3ad11c70570153 /drivers/md
parent2572629a1318eb9e13e70fa59756d7bcfb80319e (diff)
dm cache: when reloading a discard bitset allow for a different discard block size
The discard block size can change if the origin changes size or if an old DM cache is upgraded from using a discard block size that was equal to cache block size. To fix this an extent of discarded blocks is established for the purpose of translating the old discard block size to the new in-core discard block size and set bits. The old (potentially huge) discard bitset is left ondisk until it is re-written using the new in-core information on the next successful DM cache shutdown. Fixes: 7ae34e777896 ("dm cache: improve discard support") Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-target.c94
1 files changed, 87 insertions, 7 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 41e7cfdb450d..2c66315553f2 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2817,17 +2817,86 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2817 return 0; 2817 return 0;
2818} 2818}
2819 2819
2820/*
2821 * The discard block size in the on disk metadata is not
2822 * neccessarily the same as we're currently using. So we have to
2823 * be careful to only set the discarded attribute if we know it
2824 * covers a complete block of the new size.
2825 */
2826struct discard_load_info {
2827 struct cache *cache;
2828
2829 /*
2830 * These blocks are sized using the on disk dblock size, rather
2831 * than the current one.
2832 */
2833 dm_block_t block_size;
2834 dm_block_t discard_begin, discard_end;
2835};
2836
2837static void discard_load_info_init(struct cache *cache,
2838 struct discard_load_info *li)
2839{
2840 li->cache = cache;
2841 li->discard_begin = li->discard_end = 0;
2842}
2843
2844static void set_discard_range(struct discard_load_info *li)
2845{
2846 sector_t b, e;
2847
2848 if (li->discard_begin == li->discard_end)
2849 return;
2850
2851 /*
2852 * Convert to sectors.
2853 */
2854 b = li->discard_begin * li->block_size;
2855 e = li->discard_end * li->block_size;
2856
2857 /*
2858 * Then convert back to the current dblock size.
2859 */
2860 b = dm_sector_div_up(b, li->cache->discard_block_size);
2861 sector_div(e, li->cache->discard_block_size);
2862
2863 /*
2864 * The origin may have shrunk, so we need to check we're still in
2865 * bounds.
2866 */
2867 if (e > from_dblock(li->cache->discard_nr_blocks))
2868 e = from_dblock(li->cache->discard_nr_blocks);
2869
2870 for (; b < e; b++)
2871 set_discard(li->cache, to_dblock(b));
2872}
2873
2820static int load_discard(void *context, sector_t discard_block_size, 2874static int load_discard(void *context, sector_t discard_block_size,
2821 dm_dblock_t dblock, bool discard) 2875 dm_dblock_t dblock, bool discard)
2822{ 2876{
2823 struct cache *cache = context; 2877 struct discard_load_info *li = context;
2824 2878
2825 /* FIXME: handle mis-matched block size */ 2879 li->block_size = discard_block_size;
2826 2880
2827 if (discard) 2881 if (discard) {
2828 set_discard(cache, dblock); 2882 if (from_dblock(dblock) == li->discard_end)
2829 else 2883 /*
2830 clear_discard(cache, dblock); 2884 * We're already in a discard range, just extend it.
2885 */
2886 li->discard_end = li->discard_end + 1ULL;
2887
2888 else {
2889 /*
2890 * Emit the old range and start a new one.
2891 */
2892 set_discard_range(li);
2893 li->discard_begin = from_dblock(dblock);
2894 li->discard_end = li->discard_begin + 1ULL;
2895 }
2896 } else {
2897 set_discard_range(li);
2898 li->discard_begin = li->discard_end = 0;
2899 }
2831 2900
2832 return 0; 2901 return 0;
2833} 2902}
@@ -2911,11 +2980,22 @@ static int cache_preresume(struct dm_target *ti)
2911 } 2980 }
2912 2981
2913 if (!cache->loaded_discards) { 2982 if (!cache->loaded_discards) {
2914 r = dm_cache_load_discards(cache->cmd, load_discard, cache); 2983 struct discard_load_info li;
2984
2985 /*
2986 * The discard bitset could have been resized, or the
2987 * discard block size changed. To be safe we start by
2988 * setting every dblock to not discarded.
2989 */
2990 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2991
2992 discard_load_info_init(cache, &li);
2993 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
2915 if (r) { 2994 if (r) {
2916 DMERR("could not load origin discards"); 2995 DMERR("could not load origin discards");
2917 return r; 2996 return r;
2918 } 2997 }
2998 set_discard_range(&li);
2919 2999
2920 cache->loaded_discards = true; 3000 cache->loaded_discards = true;
2921 } 3001 }