diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-05 22:30:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-05 22:30:14 -0400 |
commit | fe6969094214350e586d56fbfa3ef97cdd74b270 (patch) | |
tree | ccfa126b8cd39f090a377f472f549f84f0074380 | |
parent | b196553a7fdf305273268113ba80ef303bf012af (diff) | |
parent | 19b0092e265fe9ab129902373c3127c0e0be3376 (diff) |
Merge tag 'dm-3.9-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm
Pull device-mapper fixes from Alasdair Kergon:
"A pair of patches to fix the writethrough mode of the device-mapper
cache target when the device being cached is not itself wrapped with
device-mapper."
* tag 'dm-3.9-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm:
dm cache: reduce bio front_pad size in writeback mode
dm cache: fix writes to cache device in writethrough mode
-rw-r--r-- | drivers/md/dm-cache-target.c | 51 |
1 files changed, 38 insertions, 13 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 66120bd46d15..10744091e6ca 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include "dm.h" | 7 | #include "dm.h" |
8 | #include "dm-bio-prison.h" | 8 | #include "dm-bio-prison.h" |
9 | #include "dm-bio-record.h" | ||
9 | #include "dm-cache-metadata.h" | 10 | #include "dm-cache-metadata.h" |
10 | 11 | ||
11 | #include <linux/dm-io.h> | 12 | #include <linux/dm-io.h> |
@@ -201,10 +202,15 @@ struct per_bio_data { | |||
201 | unsigned req_nr:2; | 202 | unsigned req_nr:2; |
202 | struct dm_deferred_entry *all_io_entry; | 203 | struct dm_deferred_entry *all_io_entry; |
203 | 204 | ||
204 | /* writethrough fields */ | 205 | /* |
206 | * writethrough fields. These MUST remain at the end of this | ||
207 | * structure and the 'cache' member must be the first as it | ||
208 | * is used to determine the offsetof the writethrough fields. | ||
209 | */ | ||
205 | struct cache *cache; | 210 | struct cache *cache; |
206 | dm_cblock_t cblock; | 211 | dm_cblock_t cblock; |
207 | bio_end_io_t *saved_bi_end_io; | 212 | bio_end_io_t *saved_bi_end_io; |
213 | struct dm_bio_details bio_details; | ||
208 | }; | 214 | }; |
209 | 215 | ||
210 | struct dm_cache_migration { | 216 | struct dm_cache_migration { |
@@ -513,16 +519,28 @@ static void save_stats(struct cache *cache) | |||
513 | /*---------------------------------------------------------------- | 519 | /*---------------------------------------------------------------- |
514 | * Per bio data | 520 | * Per bio data |
515 | *--------------------------------------------------------------*/ | 521 | *--------------------------------------------------------------*/ |
516 | static struct per_bio_data *get_per_bio_data(struct bio *bio) | 522 | |
523 | /* | ||
524 | * If using writeback, leave out struct per_bio_data's writethrough fields. | ||
525 | */ | ||
526 | #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) | ||
527 | #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) | ||
528 | |||
529 | static size_t get_per_bio_data_size(struct cache *cache) | ||
530 | { | ||
531 | return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; | ||
532 | } | ||
533 | |||
534 | static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) | ||
517 | { | 535 | { |
518 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 536 | struct per_bio_data *pb = dm_per_bio_data(bio, data_size); |
519 | BUG_ON(!pb); | 537 | BUG_ON(!pb); |
520 | return pb; | 538 | return pb; |
521 | } | 539 | } |
522 | 540 | ||
523 | static struct per_bio_data *init_per_bio_data(struct bio *bio) | 541 | static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) |
524 | { | 542 | { |
525 | struct per_bio_data *pb = get_per_bio_data(bio); | 543 | struct per_bio_data *pb = get_per_bio_data(bio, data_size); |
526 | 544 | ||
527 | pb->tick = false; | 545 | pb->tick = false; |
528 | pb->req_nr = dm_bio_get_target_bio_nr(bio); | 546 | pb->req_nr = dm_bio_get_target_bio_nr(bio); |
@@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, | |||
556 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | 574 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
557 | { | 575 | { |
558 | unsigned long flags; | 576 | unsigned long flags; |
559 | struct per_bio_data *pb = get_per_bio_data(bio); | 577 | size_t pb_data_size = get_per_bio_data_size(cache); |
578 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
560 | 579 | ||
561 | spin_lock_irqsave(&cache->lock, flags); | 580 | spin_lock_irqsave(&cache->lock, flags); |
562 | if (cache->need_tick_bio && | 581 | if (cache->need_tick_bio && |
@@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) | |||
635 | 654 | ||
636 | static void writethrough_endio(struct bio *bio, int err) | 655 | static void writethrough_endio(struct bio *bio, int err) |
637 | { | 656 | { |
638 | struct per_bio_data *pb = get_per_bio_data(bio); | 657 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
639 | bio->bi_end_io = pb->saved_bi_end_io; | 658 | bio->bi_end_io = pb->saved_bi_end_io; |
640 | 659 | ||
641 | if (err) { | 660 | if (err) { |
@@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err) | |||
643 | return; | 662 | return; |
644 | } | 663 | } |
645 | 664 | ||
665 | dm_bio_restore(&pb->bio_details, bio); | ||
646 | remap_to_cache(pb->cache, bio, pb->cblock); | 666 | remap_to_cache(pb->cache, bio, pb->cblock); |
647 | 667 | ||
648 | /* | 668 | /* |
@@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err) | |||
662 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | 682 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, |
663 | dm_oblock_t oblock, dm_cblock_t cblock) | 683 | dm_oblock_t oblock, dm_cblock_t cblock) |
664 | { | 684 | { |
665 | struct per_bio_data *pb = get_per_bio_data(bio); | 685 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
666 | 686 | ||
667 | pb->cache = cache; | 687 | pb->cache = cache; |
668 | pb->cblock = cblock; | 688 | pb->cblock = cblock; |
669 | pb->saved_bi_end_io = bio->bi_end_io; | 689 | pb->saved_bi_end_io = bio->bi_end_io; |
690 | dm_bio_record(&pb->bio_details, bio); | ||
670 | bio->bi_end_io = writethrough_endio; | 691 | bio->bi_end_io = writethrough_endio; |
671 | 692 | ||
672 | remap_to_origin_clear_discard(pb->cache, bio, oblock); | 693 | remap_to_origin_clear_discard(pb->cache, bio, oblock); |
@@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio) | |||
1035 | 1056 | ||
1036 | static void process_flush_bio(struct cache *cache, struct bio *bio) | 1057 | static void process_flush_bio(struct cache *cache, struct bio *bio) |
1037 | { | 1058 | { |
1038 | struct per_bio_data *pb = get_per_bio_data(bio); | 1059 | size_t pb_data_size = get_per_bio_data_size(cache); |
1060 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
1039 | 1061 | ||
1040 | BUG_ON(bio->bi_size); | 1062 | BUG_ON(bio->bi_size); |
1041 | if (!pb->req_nr) | 1063 | if (!pb->req_nr) |
@@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1107 | dm_oblock_t block = get_bio_block(cache, bio); | 1129 | dm_oblock_t block = get_bio_block(cache, bio); |
1108 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; | 1130 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; |
1109 | struct policy_result lookup_result; | 1131 | struct policy_result lookup_result; |
1110 | struct per_bio_data *pb = get_per_bio_data(bio); | 1132 | size_t pb_data_size = get_per_bio_data_size(cache); |
1133 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
1111 | bool discarded_block = is_discarded_oblock(cache, block); | 1134 | bool discarded_block = is_discarded_oblock(cache, block); |
1112 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); | 1135 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); |
1113 | 1136 | ||
@@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1881 | 1904 | ||
1882 | cache->ti = ca->ti; | 1905 | cache->ti = ca->ti; |
1883 | ti->private = cache; | 1906 | ti->private = cache; |
1884 | ti->per_bio_data_size = sizeof(struct per_bio_data); | ||
1885 | ti->num_flush_bios = 2; | 1907 | ti->num_flush_bios = 2; |
1886 | ti->flush_supported = true; | 1908 | ti->flush_supported = true; |
1887 | 1909 | ||
@@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1890 | ti->discard_zeroes_data_unsupported = true; | 1912 | ti->discard_zeroes_data_unsupported = true; |
1891 | 1913 | ||
1892 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); | 1914 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); |
1915 | ti->per_bio_data_size = get_per_bio_data_size(cache); | ||
1893 | 1916 | ||
1894 | cache->callbacks.congested_fn = cache_is_congested; | 1917 | cache->callbacks.congested_fn = cache_is_congested; |
1895 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); | 1918 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
@@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2092 | 2115 | ||
2093 | int r; | 2116 | int r; |
2094 | dm_oblock_t block = get_bio_block(cache, bio); | 2117 | dm_oblock_t block = get_bio_block(cache, bio); |
2118 | size_t pb_data_size = get_per_bio_data_size(cache); | ||
2095 | bool can_migrate = false; | 2119 | bool can_migrate = false; |
2096 | bool discarded_block; | 2120 | bool discarded_block; |
2097 | struct dm_bio_prison_cell *cell; | 2121 | struct dm_bio_prison_cell *cell; |
@@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2108 | return DM_MAPIO_REMAPPED; | 2132 | return DM_MAPIO_REMAPPED; |
2109 | } | 2133 | } |
2110 | 2134 | ||
2111 | pb = init_per_bio_data(bio); | 2135 | pb = init_per_bio_data(bio, pb_data_size); |
2112 | 2136 | ||
2113 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2137 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2114 | defer_bio(cache, bio); | 2138 | defer_bio(cache, bio); |
@@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
2193 | { | 2217 | { |
2194 | struct cache *cache = ti->private; | 2218 | struct cache *cache = ti->private; |
2195 | unsigned long flags; | 2219 | unsigned long flags; |
2196 | struct per_bio_data *pb = get_per_bio_data(bio); | 2220 | size_t pb_data_size = get_per_bio_data_size(cache); |
2221 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
2197 | 2222 | ||
2198 | if (pb->tick) { | 2223 | if (pb->tick) { |
2199 | policy_tick(cache->policy); | 2224 | policy_tick(cache->policy); |