diff options
author | Joe Thornber <ejt@redhat.com> | 2013-10-31 13:55:48 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2013-11-09 18:20:26 -0500 |
commit | c9d28d5d09a0fd5f02f1321c8e18ff7d9f92270b (patch) | |
tree | 480183ffb8ce716748a319df07820f23477e3be7 /drivers/md/dm-cache-target.c | |
parent | c86c30706caa02ffe303e6b87d53ef6a077d4cca (diff) |
dm cache: promotion optimisation for writes
If a write block triggers promotion and covers a whole block we can
avoid a copy.
Introduce dm_{hook,unhook}_bio to simplify saving and restoring bio
fields (bi_private is now used by overwrite). Switch writethrough
support over to using these helpers too.
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 93 |
1 files changed, 87 insertions, 6 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 4b564069e08f..655994fdf308 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -61,6 +61,34 @@ static void free_bitset(unsigned long *bits) | |||
61 | 61 | ||
62 | /*----------------------------------------------------------------*/ | 62 | /*----------------------------------------------------------------*/ |
63 | 63 | ||
64 | /* | ||
65 | * There are a couple of places where we let a bio run, but want to do some | ||
66 | * work before calling its endio function. We do this by temporarily | ||
67 | * changing the endio fn. | ||
68 | */ | ||
69 | struct dm_hook_info { | ||
70 | bio_end_io_t *bi_end_io; | ||
71 | void *bi_private; | ||
72 | }; | ||
73 | |||
74 | static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, | ||
75 | bio_end_io_t *bi_end_io, void *bi_private) | ||
76 | { | ||
77 | h->bi_end_io = bio->bi_end_io; | ||
78 | h->bi_private = bio->bi_private; | ||
79 | |||
80 | bio->bi_end_io = bi_end_io; | ||
81 | bio->bi_private = bi_private; | ||
82 | } | ||
83 | |||
84 | static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) | ||
85 | { | ||
86 | bio->bi_end_io = h->bi_end_io; | ||
87 | bio->bi_private = h->bi_private; | ||
88 | } | ||
89 | |||
90 | /*----------------------------------------------------------------*/ | ||
91 | |||
64 | #define PRISON_CELLS 1024 | 92 | #define PRISON_CELLS 1024 |
65 | #define MIGRATION_POOL_SIZE 128 | 93 | #define MIGRATION_POOL_SIZE 128 |
66 | #define COMMIT_PERIOD HZ | 94 | #define COMMIT_PERIOD HZ |
@@ -214,7 +242,7 @@ struct per_bio_data { | |||
214 | */ | 242 | */ |
215 | struct cache *cache; | 243 | struct cache *cache; |
216 | dm_cblock_t cblock; | 244 | dm_cblock_t cblock; |
217 | bio_end_io_t *saved_bi_end_io; | 245 | struct dm_hook_info hook_info; |
218 | struct dm_bio_details bio_details; | 246 | struct dm_bio_details bio_details; |
219 | }; | 247 | }; |
220 | 248 | ||
@@ -231,6 +259,7 @@ struct dm_cache_migration { | |||
231 | bool writeback:1; | 259 | bool writeback:1; |
232 | bool demote:1; | 260 | bool demote:1; |
233 | bool promote:1; | 261 | bool promote:1; |
262 | bool requeue_holder:1; | ||
234 | 263 | ||
235 | struct dm_bio_prison_cell *old_ocell; | 264 | struct dm_bio_prison_cell *old_ocell; |
236 | struct dm_bio_prison_cell *new_ocell; | 265 | struct dm_bio_prison_cell *new_ocell; |
@@ -666,7 +695,8 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) | |||
666 | static void writethrough_endio(struct bio *bio, int err) | 695 | static void writethrough_endio(struct bio *bio, int err) |
667 | { | 696 | { |
668 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); | 697 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
669 | bio->bi_end_io = pb->saved_bi_end_io; | 698 | |
699 | dm_unhook_bio(&pb->hook_info, bio); | ||
670 | 700 | ||
671 | if (err) { | 701 | if (err) { |
672 | bio_endio(bio, err); | 702 | bio_endio(bio, err); |
@@ -697,9 +727,8 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | |||
697 | 727 | ||
698 | pb->cache = cache; | 728 | pb->cache = cache; |
699 | pb->cblock = cblock; | 729 | pb->cblock = cblock; |
700 | pb->saved_bi_end_io = bio->bi_end_io; | 730 | dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL); |
701 | dm_bio_record(&pb->bio_details, bio); | 731 | dm_bio_record(&pb->bio_details, bio); |
702 | bio->bi_end_io = writethrough_endio; | ||
703 | 732 | ||
704 | remap_to_origin_clear_discard(pb->cache, bio, oblock); | 733 | remap_to_origin_clear_discard(pb->cache, bio, oblock); |
705 | } | 734 | } |
@@ -841,7 +870,12 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) | |||
841 | cleanup_migration(mg); | 870 | cleanup_migration(mg); |
842 | 871 | ||
843 | } else { | 872 | } else { |
844 | cell_defer(cache, mg->new_ocell, true); | 873 | if (mg->requeue_holder) |
874 | cell_defer(cache, mg->new_ocell, true); | ||
875 | else { | ||
876 | bio_endio(mg->new_ocell->holder, 0); | ||
877 | cell_defer(cache, mg->new_ocell, false); | ||
878 | } | ||
845 | clear_dirty(cache, mg->new_oblock, mg->cblock); | 879 | clear_dirty(cache, mg->new_oblock, mg->cblock); |
846 | cleanup_migration(mg); | 880 | cleanup_migration(mg); |
847 | } | 881 | } |
@@ -892,6 +926,42 @@ static void issue_copy_real(struct dm_cache_migration *mg) | |||
892 | } | 926 | } |
893 | } | 927 | } |
894 | 928 | ||
929 | static void overwrite_endio(struct bio *bio, int err) | ||
930 | { | ||
931 | struct dm_cache_migration *mg = bio->bi_private; | ||
932 | struct cache *cache = mg->cache; | ||
933 | size_t pb_data_size = get_per_bio_data_size(cache); | ||
934 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
935 | unsigned long flags; | ||
936 | |||
937 | if (err) | ||
938 | mg->err = true; | ||
939 | |||
940 | spin_lock_irqsave(&cache->lock, flags); | ||
941 | list_add_tail(&mg->list, &cache->completed_migrations); | ||
942 | dm_unhook_bio(&pb->hook_info, bio); | ||
943 | mg->requeue_holder = false; | ||
944 | spin_unlock_irqrestore(&cache->lock, flags); | ||
945 | |||
946 | wake_worker(cache); | ||
947 | } | ||
948 | |||
949 | static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) | ||
950 | { | ||
951 | size_t pb_data_size = get_per_bio_data_size(mg->cache); | ||
952 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
953 | |||
954 | dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); | ||
955 | remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); | ||
956 | generic_make_request(bio); | ||
957 | } | ||
958 | |||
959 | static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) | ||
960 | { | ||
961 | return (bio_data_dir(bio) == WRITE) && | ||
962 | (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); | ||
963 | } | ||
964 | |||
895 | static void avoid_copy(struct dm_cache_migration *mg) | 965 | static void avoid_copy(struct dm_cache_migration *mg) |
896 | { | 966 | { |
897 | atomic_inc(&mg->cache->stats.copies_avoided); | 967 | atomic_inc(&mg->cache->stats.copies_avoided); |
@@ -906,9 +976,17 @@ static void issue_copy(struct dm_cache_migration *mg) | |||
906 | if (mg->writeback || mg->demote) | 976 | if (mg->writeback || mg->demote) |
907 | avoid = !is_dirty(cache, mg->cblock) || | 977 | avoid = !is_dirty(cache, mg->cblock) || |
908 | is_discarded_oblock(cache, mg->old_oblock); | 978 | is_discarded_oblock(cache, mg->old_oblock); |
909 | else | 979 | else { |
980 | struct bio *bio = mg->new_ocell->holder; | ||
981 | |||
910 | avoid = is_discarded_oblock(cache, mg->new_oblock); | 982 | avoid = is_discarded_oblock(cache, mg->new_oblock); |
911 | 983 | ||
984 | if (!avoid && bio_writes_complete_block(cache, bio)) { | ||
985 | issue_overwrite(mg, bio); | ||
986 | return; | ||
987 | } | ||
988 | } | ||
989 | |||
912 | avoid ? avoid_copy(mg) : issue_copy_real(mg); | 990 | avoid ? avoid_copy(mg) : issue_copy_real(mg); |
913 | } | 991 | } |
914 | 992 | ||
@@ -998,6 +1076,7 @@ static void promote(struct cache *cache, struct prealloc *structs, | |||
998 | mg->writeback = false; | 1076 | mg->writeback = false; |
999 | mg->demote = false; | 1077 | mg->demote = false; |
1000 | mg->promote = true; | 1078 | mg->promote = true; |
1079 | mg->requeue_holder = true; | ||
1001 | mg->cache = cache; | 1080 | mg->cache = cache; |
1002 | mg->new_oblock = oblock; | 1081 | mg->new_oblock = oblock; |
1003 | mg->cblock = cblock; | 1082 | mg->cblock = cblock; |
@@ -1019,6 +1098,7 @@ static void writeback(struct cache *cache, struct prealloc *structs, | |||
1019 | mg->writeback = true; | 1098 | mg->writeback = true; |
1020 | mg->demote = false; | 1099 | mg->demote = false; |
1021 | mg->promote = false; | 1100 | mg->promote = false; |
1101 | mg->requeue_holder = true; | ||
1022 | mg->cache = cache; | 1102 | mg->cache = cache; |
1023 | mg->old_oblock = oblock; | 1103 | mg->old_oblock = oblock; |
1024 | mg->cblock = cblock; | 1104 | mg->cblock = cblock; |
@@ -1042,6 +1122,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs, | |||
1042 | mg->writeback = false; | 1122 | mg->writeback = false; |
1043 | mg->demote = true; | 1123 | mg->demote = true; |
1044 | mg->promote = true; | 1124 | mg->promote = true; |
1125 | mg->requeue_holder = true; | ||
1045 | mg->cache = cache; | 1126 | mg->cache = cache; |
1046 | mg->old_oblock = old_oblock; | 1127 | mg->old_oblock = old_oblock; |
1047 | mg->new_oblock = new_oblock; | 1128 | mg->new_oblock = new_oblock; |