diff options
author | Joe Thornber <ejt@redhat.com> | 2013-10-31 13:55:49 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2013-11-11 11:37:45 -0500 |
commit | f494a9c6b1b6dd9a9f21bbb75d9210d478eeb498 (patch) | |
tree | e57197b7d38e7c6b2d718681fd70540ccc9a831e /drivers/md | |
parent | c9d28d5d09a0fd5f02f1321c8e18ff7d9f92270b (diff) |
dm cache: cache shrinking support
Allow a cache to shrink if the blocks being removed from the cache are
not dirty.
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-cache-metadata.c | 66 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 63 |
2 files changed, 120 insertions, 9 deletions
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 2262b4e57a28..062b83ed3e84 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c | |||
@@ -667,19 +667,85 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd) | |||
667 | kfree(cmd); | 667 | kfree(cmd); |
668 | } | 668 | } |
669 | 669 | ||
670 | /* | ||
671 | * Checks that the given cache block is either unmapped or clean. | ||
672 | */ | ||
673 | static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b, | ||
674 | bool *result) | ||
675 | { | ||
676 | int r; | ||
677 | __le64 value; | ||
678 | dm_oblock_t ob; | ||
679 | unsigned flags; | ||
680 | |||
681 | r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value); | ||
682 | if (r) { | ||
683 | DMERR("block_unmapped_or_clean failed"); | ||
684 | return r; | ||
685 | } | ||
686 | |||
687 | unpack_value(value, &ob, &flags); | ||
688 | *result = !((flags & M_VALID) && (flags & M_DIRTY)); | ||
689 | |||
690 | return 0; | ||
691 | } | ||
692 | |||
693 | static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd, | ||
694 | dm_cblock_t begin, dm_cblock_t end, | ||
695 | bool *result) | ||
696 | { | ||
697 | int r; | ||
698 | *result = true; | ||
699 | |||
700 | while (begin != end) { | ||
701 | r = block_unmapped_or_clean(cmd, begin, result); | ||
702 | if (r) | ||
703 | return r; | ||
704 | |||
705 | if (!*result) { | ||
706 | DMERR("cache block %llu is dirty", | ||
707 | (unsigned long long) from_cblock(begin)); | ||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | begin = to_cblock(from_cblock(begin) + 1); | ||
712 | } | ||
713 | |||
714 | return 0; | ||
715 | } | ||
716 | |||
670 | int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size) | 717 | int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size) |
671 | { | 718 | { |
672 | int r; | 719 | int r; |
720 | bool clean; | ||
673 | __le64 null_mapping = pack_value(0, 0); | 721 | __le64 null_mapping = pack_value(0, 0); |
674 | 722 | ||
675 | down_write(&cmd->root_lock); | 723 | down_write(&cmd->root_lock); |
676 | __dm_bless_for_disk(&null_mapping); | 724 | __dm_bless_for_disk(&null_mapping); |
725 | |||
726 | if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) { | ||
727 | r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean); | ||
728 | if (r) { | ||
729 | __dm_unbless_for_disk(&null_mapping); | ||
730 | goto out; | ||
731 | } | ||
732 | |||
733 | if (!clean) { | ||
734 | DMERR("unable to shrink cache due to dirty blocks"); | ||
735 | r = -EINVAL; | ||
736 | __dm_unbless_for_disk(&null_mapping); | ||
737 | goto out; | ||
738 | } | ||
739 | } | ||
740 | |||
677 | r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks), | 741 | r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks), |
678 | from_cblock(new_cache_size), | 742 | from_cblock(new_cache_size), |
679 | &null_mapping, &cmd->root); | 743 | &null_mapping, &cmd->root); |
680 | if (!r) | 744 | if (!r) |
681 | cmd->cache_blocks = new_cache_size; | 745 | cmd->cache_blocks = new_cache_size; |
682 | cmd->changed = true; | 746 | cmd->changed = true; |
747 | |||
748 | out: | ||
683 | up_write(&cmd->root_lock); | 749 | up_write(&cmd->root_lock); |
684 | 750 | ||
685 | return r; | 751 | return r; |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 655994fdf308..183dfc9db297 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -2502,26 +2502,71 @@ static int load_discard(void *context, sector_t discard_block_size, | |||
2502 | return 0; | 2502 | return 0; |
2503 | } | 2503 | } |
2504 | 2504 | ||
2505 | static dm_cblock_t get_cache_dev_size(struct cache *cache) | ||
2506 | { | ||
2507 | sector_t size = get_dev_size(cache->cache_dev); | ||
2508 | (void) sector_div(size, cache->sectors_per_block); | ||
2509 | return to_cblock(size); | ||
2510 | } | ||
2511 | |||
2512 | static bool can_resize(struct cache *cache, dm_cblock_t new_size) | ||
2513 | { | ||
2514 | if (from_cblock(new_size) > from_cblock(cache->cache_size)) | ||
2515 | return true; | ||
2516 | |||
2517 | /* | ||
2518 | * We can't drop a dirty block when shrinking the cache. | ||
2519 | */ | ||
2520 | while (from_cblock(new_size) < from_cblock(cache->cache_size)) { | ||
2521 | new_size = to_cblock(from_cblock(new_size) + 1); | ||
2522 | if (is_dirty(cache, new_size)) { | ||
2523 | DMERR("unable to shrink cache; cache block %llu is dirty", | ||
2524 | (unsigned long long) from_cblock(new_size)); | ||
2525 | return false; | ||
2526 | } | ||
2527 | } | ||
2528 | |||
2529 | return true; | ||
2530 | } | ||
2531 | |||
2532 | static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) | ||
2533 | { | ||
2534 | int r; | ||
2535 | |||
2536 | r = dm_cache_resize(cache->cmd, cache->cache_size); | ||
2537 | if (r) { | ||
2538 | DMERR("could not resize cache metadata"); | ||
2539 | return r; | ||
2540 | } | ||
2541 | |||
2542 | cache->cache_size = new_size; | ||
2543 | |||
2544 | return 0; | ||
2545 | } | ||
2546 | |||
2505 | static int cache_preresume(struct dm_target *ti) | 2547 | static int cache_preresume(struct dm_target *ti) |
2506 | { | 2548 | { |
2507 | int r = 0; | 2549 | int r = 0; |
2508 | struct cache *cache = ti->private; | 2550 | struct cache *cache = ti->private; |
2509 | sector_t actual_cache_size = get_dev_size(cache->cache_dev); | 2551 | dm_cblock_t csize = get_cache_dev_size(cache); |
2510 | (void) sector_div(actual_cache_size, cache->sectors_per_block); | ||
2511 | 2552 | ||
2512 | /* | 2553 | /* |
2513 | * Check to see if the cache has resized. | 2554 | * Check to see if the cache has resized. |
2514 | */ | 2555 | */ |
2515 | if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) { | 2556 | if (!cache->sized) { |
2516 | cache->cache_size = to_cblock(actual_cache_size); | 2557 | r = resize_cache_dev(cache, csize); |
2517 | 2558 | if (r) | |
2518 | r = dm_cache_resize(cache->cmd, cache->cache_size); | ||
2519 | if (r) { | ||
2520 | DMERR("could not resize cache metadata"); | ||
2521 | return r; | 2559 | return r; |
2522 | } | ||
2523 | 2560 | ||
2524 | cache->sized = true; | 2561 | cache->sized = true; |
2562 | |||
2563 | } else if (csize != cache->cache_size) { | ||
2564 | if (!can_resize(cache, csize)) | ||
2565 | return -EINVAL; | ||
2566 | |||
2567 | r = resize_cache_dev(cache, csize); | ||
2568 | if (r) | ||
2569 | return r; | ||
2525 | } | 2570 | } |
2526 | 2571 | ||
2527 | if (!cache->loaded_mappings) { | 2572 | if (!cache->loaded_mappings) { |