aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-11-07 09:47:07 -0500
committerMike Snitzer <snitzer@redhat.com>2014-11-10 15:25:30 -0500
commit1bad9bc4ee899a108499e5eac6baafff018b4d0b (patch)
tree32eb64e78c6d68b65c29b9462bb264c3c69cdf8f /drivers/md/dm-cache-target.c
parent5f274d886598c9fd26d2499bf3f68306f170e9db (diff)
dm cache: revert "remove remainder of distinct discard block size"
This reverts commit 64ab346a360a4b15c28fb8531918d4a01f4eabd9 because we actually do want to allow the discard blocksize to be larger than the cache blocksize. Further dm-cache discard changes will make this possible. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c72
1 files changed, 46 insertions, 26 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 890e2fff4074..ced7fd4adddb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -236,8 +236,9 @@ struct cache {
236 /* 236 /*
237 * origin_blocks entries, discarded if set. 237 * origin_blocks entries, discarded if set.
238 */ 238 */
239 dm_oblock_t discard_nr_blocks; 239 dm_dblock_t discard_nr_blocks;
240 unsigned long *discard_bitset; 240 unsigned long *discard_bitset;
241 uint32_t discard_block_size;
241 242
242 /* 243 /*
243 * Rather than reconstructing the table line for the status we just 244 * Rather than reconstructing the table line for the status we just
@@ -524,33 +525,48 @@ static dm_block_t block_div(dm_block_t b, uint32_t n)
524 return b; 525 return b;
525} 526}
526 527
527static void set_discard(struct cache *cache, dm_oblock_t b) 528static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
529{
530 uint32_t discard_blocks = cache->discard_block_size;
531 dm_block_t b = from_oblock(oblock);
532
533 if (!block_size_is_power_of_two(cache))
534 discard_blocks = discard_blocks / cache->sectors_per_block;
535 else
536 discard_blocks >>= cache->sectors_per_block_shift;
537
538 b = block_div(b, discard_blocks);
539
540 return to_dblock(b);
541}
542
543static void set_discard(struct cache *cache, dm_dblock_t b)
528{ 544{
529 unsigned long flags; 545 unsigned long flags;
530 546
531 atomic_inc(&cache->stats.discard_count); 547 atomic_inc(&cache->stats.discard_count);
532 548
533 spin_lock_irqsave(&cache->lock, flags); 549 spin_lock_irqsave(&cache->lock, flags);
534 set_bit(from_oblock(b), cache->discard_bitset); 550 set_bit(from_dblock(b), cache->discard_bitset);
535 spin_unlock_irqrestore(&cache->lock, flags); 551 spin_unlock_irqrestore(&cache->lock, flags);
536} 552}
537 553
538static void clear_discard(struct cache *cache, dm_oblock_t b) 554static void clear_discard(struct cache *cache, dm_dblock_t b)
539{ 555{
540 unsigned long flags; 556 unsigned long flags;
541 557
542 spin_lock_irqsave(&cache->lock, flags); 558 spin_lock_irqsave(&cache->lock, flags);
543 clear_bit(from_oblock(b), cache->discard_bitset); 559 clear_bit(from_dblock(b), cache->discard_bitset);
544 spin_unlock_irqrestore(&cache->lock, flags); 560 spin_unlock_irqrestore(&cache->lock, flags);
545} 561}
546 562
547static bool is_discarded(struct cache *cache, dm_oblock_t b) 563static bool is_discarded(struct cache *cache, dm_dblock_t b)
548{ 564{
549 int r; 565 int r;
550 unsigned long flags; 566 unsigned long flags;
551 567
552 spin_lock_irqsave(&cache->lock, flags); 568 spin_lock_irqsave(&cache->lock, flags);
553 r = test_bit(from_oblock(b), cache->discard_bitset); 569 r = test_bit(from_dblock(b), cache->discard_bitset);
554 spin_unlock_irqrestore(&cache->lock, flags); 570 spin_unlock_irqrestore(&cache->lock, flags);
555 571
556 return r; 572 return r;
@@ -562,7 +578,8 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
562 unsigned long flags; 578 unsigned long flags;
563 579
564 spin_lock_irqsave(&cache->lock, flags); 580 spin_lock_irqsave(&cache->lock, flags);
565 r = test_bit(from_oblock(b), cache->discard_bitset); 581 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
582 cache->discard_bitset);
566 spin_unlock_irqrestore(&cache->lock, flags); 583 spin_unlock_irqrestore(&cache->lock, flags);
567 584
568 return r; 585 return r;
@@ -687,7 +704,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
687 check_if_tick_bio_needed(cache, bio); 704 check_if_tick_bio_needed(cache, bio);
688 remap_to_origin(cache, bio); 705 remap_to_origin(cache, bio);
689 if (bio_data_dir(bio) == WRITE) 706 if (bio_data_dir(bio) == WRITE)
690 clear_discard(cache, oblock); 707 clear_discard(cache, oblock_to_dblock(cache, oblock));
691} 708}
692 709
693static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, 710static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
@@ -697,7 +714,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
697 remap_to_cache(cache, bio, cblock); 714 remap_to_cache(cache, bio, cblock);
698 if (bio_data_dir(bio) == WRITE) { 715 if (bio_data_dir(bio) == WRITE) {
699 set_dirty(cache, oblock, cblock); 716 set_dirty(cache, oblock, cblock);
700 clear_discard(cache, oblock); 717 clear_discard(cache, oblock_to_dblock(cache, oblock));
701 } 718 }
702} 719}
703 720
@@ -1301,14 +1318,14 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1301static void process_discard_bio(struct cache *cache, struct bio *bio) 1318static void process_discard_bio(struct cache *cache, struct bio *bio)
1302{ 1319{
1303 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, 1320 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1304 cache->sectors_per_block); 1321 cache->discard_block_size);
1305 dm_block_t end_block = bio_end_sector(bio); 1322 dm_block_t end_block = bio_end_sector(bio);
1306 dm_block_t b; 1323 dm_block_t b;
1307 1324
1308 end_block = block_div(end_block, cache->sectors_per_block); 1325 end_block = block_div(end_block, cache->discard_block_size);
1309 1326
1310 for (b = start_block; b < end_block; b++) 1327 for (b = start_block; b < end_block; b++)
1311 set_discard(cache, to_oblock(b)); 1328 set_discard(cache, to_dblock(b));
1312 1329
1313 bio_endio(bio, 0); 1330 bio_endio(bio, 0);
1314} 1331}
@@ -2303,13 +2320,14 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2303 } 2320 }
2304 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); 2321 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2305 2322
2306 cache->discard_nr_blocks = cache->origin_blocks; 2323 cache->discard_block_size = cache->sectors_per_block;
2307 cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks)); 2324 cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
2325 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2308 if (!cache->discard_bitset) { 2326 if (!cache->discard_bitset) {
2309 *error = "could not allocate discard bitset"; 2327 *error = "could not allocate discard bitset";
2310 goto bad; 2328 goto bad;
2311 } 2329 }
2312 clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks)); 2330 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2313 2331
2314 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2332 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2315 if (IS_ERR(cache->copier)) { 2333 if (IS_ERR(cache->copier)) {
@@ -2599,16 +2617,16 @@ static int write_discard_bitset(struct cache *cache)
2599{ 2617{
2600 unsigned i, r; 2618 unsigned i, r;
2601 2619
2602 r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, 2620 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2603 cache->origin_blocks); 2621 cache->discard_nr_blocks);
2604 if (r) { 2622 if (r) {
2605 DMERR("could not resize on-disk discard bitset"); 2623 DMERR("could not resize on-disk discard bitset");
2606 return r; 2624 return r;
2607 } 2625 }
2608 2626
2609 for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { 2627 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2610 r = dm_cache_set_discard(cache->cmd, to_oblock(i), 2628 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2611 is_discarded(cache, to_oblock(i))); 2629 is_discarded(cache, to_dblock(i)));
2612 if (r) 2630 if (r)
2613 return r; 2631 return r;
2614 } 2632 }
@@ -2681,14 +2699,16 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2681} 2699}
2682 2700
2683static int load_discard(void *context, sector_t discard_block_size, 2701static int load_discard(void *context, sector_t discard_block_size,
2684 dm_oblock_t oblock, bool discard) 2702 dm_dblock_t dblock, bool discard)
2685{ 2703{
2686 struct cache *cache = context; 2704 struct cache *cache = context;
2687 2705
2706 /* FIXME: handle mis-matched block size */
2707
2688 if (discard) 2708 if (discard)
2689 set_discard(cache, oblock); 2709 set_discard(cache, dblock);
2690 else 2710 else
2691 clear_discard(cache, oblock); 2711 clear_discard(cache, dblock);
2692 2712
2693 return 0; 2713 return 0;
2694} 2714}
@@ -3079,8 +3099,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3079 /* 3099 /*
3080 * FIXME: these limits may be incompatible with the cache device 3100 * FIXME: these limits may be incompatible with the cache device
3081 */ 3101 */
3082 limits->max_discard_sectors = cache->sectors_per_block; 3102 limits->max_discard_sectors = cache->discard_block_size;
3083 limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT; 3103 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3084} 3104}
3085 3105
3086static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) 3106static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)