diff options
author | Mike Snitzer <snitzer@redhat.com> | 2014-03-20 10:11:15 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-03-27 16:56:23 -0400 |
commit | d132cc6d9e92424bb9d4fd35f5bd0e55d583f4be (patch) | |
tree | 9e436740996dd835cf2d9fff2c148cc48cf7b9e9 /drivers/md/dm-cache-target.c | |
parent | 428e4698642794444cdb26c148a827f22c28d546 (diff) |
dm cache: prevent corruption caused by discard_block_size > cache_block_size
If the discard block size is larger than the cache block size we will
not properly quiesce IO to a region that is about to be discarded. This
results in a race between a cache migration where no copy is needed, and
a write to an adjacent cache block that's within the same large discard
block.
Workaround this by limiting the discard_block_size to cache_block_size.
Also limit the max_discard_sectors to cache_block_size.
A more comprehensive fix that introduces range locking support in the
bio_prison and proper quiescing of a discard range that spans multiple
cache blocks is already in development.
Reported-by: Morgan Mears <Morgan.Mears@netapp.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Joe Thornber <ejt@redhat.com>
Acked-by: Heinz Mauelshagen <heinzm@redhat.com>
Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 37 |
1 files changed, 3 insertions, 34 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 074b9c8e4cf0..bccb7ae34e61 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -239,7 +239,7 @@ struct cache { | |||
239 | */ | 239 | */ |
240 | dm_dblock_t discard_nr_blocks; | 240 | dm_dblock_t discard_nr_blocks; |
241 | unsigned long *discard_bitset; | 241 | unsigned long *discard_bitset; |
242 | uint32_t discard_block_size; /* a power of 2 times sectors per block */ | 242 | uint32_t discard_block_size; |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * Rather than reconstructing the table line for the status we just | 245 | * Rather than reconstructing the table line for the status we just |
@@ -2171,35 +2171,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, | |||
2171 | return 0; | 2171 | return 0; |
2172 | } | 2172 | } |
2173 | 2173 | ||
2174 | /* | ||
2175 | * We want the discard block size to be a power of two, at least the size | ||
2176 | * of the cache block size, and have no more than 2^14 discard blocks | ||
2177 | * across the origin. | ||
2178 | */ | ||
2179 | #define MAX_DISCARD_BLOCKS (1 << 14) | ||
2180 | |||
2181 | static bool too_many_discard_blocks(sector_t discard_block_size, | ||
2182 | sector_t origin_size) | ||
2183 | { | ||
2184 | (void) sector_div(origin_size, discard_block_size); | ||
2185 | |||
2186 | return origin_size > MAX_DISCARD_BLOCKS; | ||
2187 | } | ||
2188 | |||
2189 | static sector_t calculate_discard_block_size(sector_t cache_block_size, | ||
2190 | sector_t origin_size) | ||
2191 | { | ||
2192 | sector_t discard_block_size; | ||
2193 | |||
2194 | discard_block_size = roundup_pow_of_two(cache_block_size); | ||
2195 | |||
2196 | if (origin_size) | ||
2197 | while (too_many_discard_blocks(discard_block_size, origin_size)) | ||
2198 | discard_block_size *= 2; | ||
2199 | |||
2200 | return discard_block_size; | ||
2201 | } | ||
2202 | |||
2203 | #define DEFAULT_MIGRATION_THRESHOLD 2048 | 2174 | #define DEFAULT_MIGRATION_THRESHOLD 2048 |
2204 | 2175 | ||
2205 | static int cache_create(struct cache_args *ca, struct cache **result) | 2176 | static int cache_create(struct cache_args *ca, struct cache **result) |
@@ -2321,9 +2292,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
2321 | } | 2292 | } |
2322 | clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); | 2293 | clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); |
2323 | 2294 | ||
2324 | cache->discard_block_size = | 2295 | cache->discard_block_size = cache->sectors_per_block; |
2325 | calculate_discard_block_size(cache->sectors_per_block, | ||
2326 | cache->origin_sectors); | ||
2327 | cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); | 2296 | cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); |
2328 | cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); | 2297 | cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); |
2329 | if (!cache->discard_bitset) { | 2298 | if (!cache->discard_bitset) { |
@@ -3120,7 +3089,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) | |||
3120 | /* | 3089 | /* |
3121 | * FIXME: these limits may be incompatible with the cache device | 3090 | * FIXME: these limits may be incompatible with the cache device |
3122 | */ | 3091 | */ |
3123 | limits->max_discard_sectors = cache->discard_block_size * 1024; | 3092 | limits->max_discard_sectors = cache->discard_block_size; |
3124 | limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; | 3093 | limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; |
3125 | } | 3094 | } |
3126 | 3095 | ||