diff options
author | Heinz Mauelshagen <heinzm@redhat.com> | 2014-03-27 15:14:10 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-03-27 16:56:23 -0400 |
commit | 64ab346a360a4b15c28fb8531918d4a01f4eabd9 (patch) | |
tree | 7b1902c2b9c5980b7a18ea8f7e7a4cc633911804 /drivers/md/dm-cache-target.c | |
parent | d132cc6d9e92424bb9d4fd35f5bd0e55d583f4be (diff) |
dm cache: remove remainder of distinct discard block size
Discard block size not being equal to cache block size causes data
corruption by erroneously avoiding migrations in issue_copy() because
the discard state is being cleared for a group of cache blocks when it
should not.
Completely remove all code that enabled a distinction between the
cache block size and discard block size.
Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 72 |
1 files changed, 26 insertions, 46 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index bccb7ae34e61..8534679918f4 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -237,9 +237,8 @@ struct cache { | |||
237 | /* | 237 | /* |
238 | * origin_blocks entries, discarded if set. | 238 | * origin_blocks entries, discarded if set. |
239 | */ | 239 | */ |
240 | dm_dblock_t discard_nr_blocks; | 240 | dm_oblock_t discard_nr_blocks; |
241 | unsigned long *discard_bitset; | 241 | unsigned long *discard_bitset; |
242 | uint32_t discard_block_size; | ||
243 | 242 | ||
244 | /* | 243 | /* |
245 | * Rather than reconstructing the table line for the status we just | 244 | * Rather than reconstructing the table line for the status we just |
@@ -526,48 +525,33 @@ static dm_block_t block_div(dm_block_t b, uint32_t n) | |||
526 | return b; | 525 | return b; |
527 | } | 526 | } |
528 | 527 | ||
529 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) | 528 | static void set_discard(struct cache *cache, dm_oblock_t b) |
530 | { | ||
531 | uint32_t discard_blocks = cache->discard_block_size; | ||
532 | dm_block_t b = from_oblock(oblock); | ||
533 | |||
534 | if (!block_size_is_power_of_two(cache)) | ||
535 | discard_blocks = discard_blocks / cache->sectors_per_block; | ||
536 | else | ||
537 | discard_blocks >>= cache->sectors_per_block_shift; | ||
538 | |||
539 | b = block_div(b, discard_blocks); | ||
540 | |||
541 | return to_dblock(b); | ||
542 | } | ||
543 | |||
544 | static void set_discard(struct cache *cache, dm_dblock_t b) | ||
545 | { | 529 | { |
546 | unsigned long flags; | 530 | unsigned long flags; |
547 | 531 | ||
548 | atomic_inc(&cache->stats.discard_count); | 532 | atomic_inc(&cache->stats.discard_count); |
549 | 533 | ||
550 | spin_lock_irqsave(&cache->lock, flags); | 534 | spin_lock_irqsave(&cache->lock, flags); |
551 | set_bit(from_dblock(b), cache->discard_bitset); | 535 | set_bit(from_oblock(b), cache->discard_bitset); |
552 | spin_unlock_irqrestore(&cache->lock, flags); | 536 | spin_unlock_irqrestore(&cache->lock, flags); |
553 | } | 537 | } |
554 | 538 | ||
555 | static void clear_discard(struct cache *cache, dm_dblock_t b) | 539 | static void clear_discard(struct cache *cache, dm_oblock_t b) |
556 | { | 540 | { |
557 | unsigned long flags; | 541 | unsigned long flags; |
558 | 542 | ||
559 | spin_lock_irqsave(&cache->lock, flags); | 543 | spin_lock_irqsave(&cache->lock, flags); |
560 | clear_bit(from_dblock(b), cache->discard_bitset); | 544 | clear_bit(from_oblock(b), cache->discard_bitset); |
561 | spin_unlock_irqrestore(&cache->lock, flags); | 545 | spin_unlock_irqrestore(&cache->lock, flags); |
562 | } | 546 | } |
563 | 547 | ||
564 | static bool is_discarded(struct cache *cache, dm_dblock_t b) | 548 | static bool is_discarded(struct cache *cache, dm_oblock_t b) |
565 | { | 549 | { |
566 | int r; | 550 | int r; |
567 | unsigned long flags; | 551 | unsigned long flags; |
568 | 552 | ||
569 | spin_lock_irqsave(&cache->lock, flags); | 553 | spin_lock_irqsave(&cache->lock, flags); |
570 | r = test_bit(from_dblock(b), cache->discard_bitset); | 554 | r = test_bit(from_oblock(b), cache->discard_bitset); |
571 | spin_unlock_irqrestore(&cache->lock, flags); | 555 | spin_unlock_irqrestore(&cache->lock, flags); |
572 | 556 | ||
573 | return r; | 557 | return r; |
@@ -579,8 +563,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) | |||
579 | unsigned long flags; | 563 | unsigned long flags; |
580 | 564 | ||
581 | spin_lock_irqsave(&cache->lock, flags); | 565 | spin_lock_irqsave(&cache->lock, flags); |
582 | r = test_bit(from_dblock(oblock_to_dblock(cache, b)), | 566 | r = test_bit(from_oblock(b), cache->discard_bitset); |
583 | cache->discard_bitset); | ||
584 | spin_unlock_irqrestore(&cache->lock, flags); | 567 | spin_unlock_irqrestore(&cache->lock, flags); |
585 | 568 | ||
586 | return r; | 569 | return r; |
@@ -705,7 +688,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, | |||
705 | check_if_tick_bio_needed(cache, bio); | 688 | check_if_tick_bio_needed(cache, bio); |
706 | remap_to_origin(cache, bio); | 689 | remap_to_origin(cache, bio); |
707 | if (bio_data_dir(bio) == WRITE) | 690 | if (bio_data_dir(bio) == WRITE) |
708 | clear_discard(cache, oblock_to_dblock(cache, oblock)); | 691 | clear_discard(cache, oblock); |
709 | } | 692 | } |
710 | 693 | ||
711 | static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, | 694 | static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, |
@@ -715,7 +698,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, | |||
715 | remap_to_cache(cache, bio, cblock); | 698 | remap_to_cache(cache, bio, cblock); |
716 | if (bio_data_dir(bio) == WRITE) { | 699 | if (bio_data_dir(bio) == WRITE) { |
717 | set_dirty(cache, oblock, cblock); | 700 | set_dirty(cache, oblock, cblock); |
718 | clear_discard(cache, oblock_to_dblock(cache, oblock)); | 701 | clear_discard(cache, oblock); |
719 | } | 702 | } |
720 | } | 703 | } |
721 | 704 | ||
@@ -1288,14 +1271,14 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) | |||
1288 | static void process_discard_bio(struct cache *cache, struct bio *bio) | 1271 | static void process_discard_bio(struct cache *cache, struct bio *bio) |
1289 | { | 1272 | { |
1290 | dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, | 1273 | dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, |
1291 | cache->discard_block_size); | 1274 | cache->sectors_per_block); |
1292 | dm_block_t end_block = bio_end_sector(bio); | 1275 | dm_block_t end_block = bio_end_sector(bio); |
1293 | dm_block_t b; | 1276 | dm_block_t b; |
1294 | 1277 | ||
1295 | end_block = block_div(end_block, cache->discard_block_size); | 1278 | end_block = block_div(end_block, cache->sectors_per_block); |
1296 | 1279 | ||
1297 | for (b = start_block; b < end_block; b++) | 1280 | for (b = start_block; b < end_block; b++) |
1298 | set_discard(cache, to_dblock(b)); | 1281 | set_discard(cache, to_oblock(b)); |
1299 | 1282 | ||
1300 | bio_endio(bio, 0); | 1283 | bio_endio(bio, 0); |
1301 | } | 1284 | } |
@@ -2292,14 +2275,13 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
2292 | } | 2275 | } |
2293 | clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); | 2276 | clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); |
2294 | 2277 | ||
2295 | cache->discard_block_size = cache->sectors_per_block; | 2278 | cache->discard_nr_blocks = cache->origin_blocks; |
2296 | cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); | 2279 | cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks)); |
2297 | cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); | ||
2298 | if (!cache->discard_bitset) { | 2280 | if (!cache->discard_bitset) { |
2299 | *error = "could not allocate discard bitset"; | 2281 | *error = "could not allocate discard bitset"; |
2300 | goto bad; | 2282 | goto bad; |
2301 | } | 2283 | } |
2302 | clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); | 2284 | clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks)); |
2303 | 2285 | ||
2304 | cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); | 2286 | cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); |
2305 | if (IS_ERR(cache->copier)) { | 2287 | if (IS_ERR(cache->copier)) { |
@@ -2583,16 +2565,16 @@ static int write_discard_bitset(struct cache *cache) | |||
2583 | { | 2565 | { |
2584 | unsigned i, r; | 2566 | unsigned i, r; |
2585 | 2567 | ||
2586 | r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, | 2568 | r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, |
2587 | cache->discard_nr_blocks); | 2569 | cache->origin_blocks); |
2588 | if (r) { | 2570 | if (r) { |
2589 | DMERR("could not resize on-disk discard bitset"); | 2571 | DMERR("could not resize on-disk discard bitset"); |
2590 | return r; | 2572 | return r; |
2591 | } | 2573 | } |
2592 | 2574 | ||
2593 | for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { | 2575 | for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { |
2594 | r = dm_cache_set_discard(cache->cmd, to_dblock(i), | 2576 | r = dm_cache_set_discard(cache->cmd, to_oblock(i), |
2595 | is_discarded(cache, to_dblock(i))); | 2577 | is_discarded(cache, to_oblock(i))); |
2596 | if (r) | 2578 | if (r) |
2597 | return r; | 2579 | return r; |
2598 | } | 2580 | } |
@@ -2689,16 +2671,14 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, | |||
2689 | } | 2671 | } |
2690 | 2672 | ||
2691 | static int load_discard(void *context, sector_t discard_block_size, | 2673 | static int load_discard(void *context, sector_t discard_block_size, |
2692 | dm_dblock_t dblock, bool discard) | 2674 | dm_oblock_t oblock, bool discard) |
2693 | { | 2675 | { |
2694 | struct cache *cache = context; | 2676 | struct cache *cache = context; |
2695 | 2677 | ||
2696 | /* FIXME: handle mis-matched block size */ | ||
2697 | |||
2698 | if (discard) | 2678 | if (discard) |
2699 | set_discard(cache, dblock); | 2679 | set_discard(cache, oblock); |
2700 | else | 2680 | else |
2701 | clear_discard(cache, dblock); | 2681 | clear_discard(cache, oblock); |
2702 | 2682 | ||
2703 | return 0; | 2683 | return 0; |
2704 | } | 2684 | } |
@@ -3089,8 +3069,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) | |||
3089 | /* | 3069 | /* |
3090 | * FIXME: these limits may be incompatible with the cache device | 3070 | * FIXME: these limits may be incompatible with the cache device |
3091 | */ | 3071 | */ |
3092 | limits->max_discard_sectors = cache->discard_block_size; | 3072 | limits->max_discard_sectors = cache->sectors_per_block; |
3093 | limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; | 3073 | limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT; |
3094 | } | 3074 | } |
3095 | 3075 | ||
3096 | static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | 3076 | static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) |