aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2013-04-05 10:36:34 -0400
committerAlasdair G Kergon <agk@redhat.com>2013-04-05 10:36:34 -0400
commit19b0092e265fe9ab129902373c3127c0e0be3376 (patch)
tree1aae9fbcc44e25af28816cb317c684cbd259e503
parentb844fe691897221ad0d5e0279c8ea9e3e4a46982 (diff)
dm cache: reduce bio front_pad size in writeback mode
A recent patch to fix the dm cache target's writethrough mode extended the bio's front_pad to include a 1056-byte struct dm_bio_details. Writeback mode doesn't need this, so this patch reduces the per_bio_data_size to 16 bytes in this case instead of 1096. The dm_bio_details structure was added in "dm cache: fix writes to cache device in writethrough mode" which fixed commit e2e74d617e ("dm cache: fix race in writethrough implementation"). In writeback mode we avoid allocating the writethrough-specific members of the per_bio_data structure (the dm_bio_details structure included). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm-cache-target.c47
1 files changed, 34 insertions, 13 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1ab122a75764..10744091e6ca 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -202,7 +202,11 @@ struct per_bio_data {
202 unsigned req_nr:2; 202 unsigned req_nr:2;
203 struct dm_deferred_entry *all_io_entry; 203 struct dm_deferred_entry *all_io_entry;
204 204
205 /* writethrough fields */ 205 /*
206 * writethrough fields. These MUST remain at the end of this
207 * structure and the 'cache' member must be the first as it
208 * is used to determine the offsetof the writethrough fields.
209 */
206 struct cache *cache; 210 struct cache *cache;
207 dm_cblock_t cblock; 211 dm_cblock_t cblock;
208 bio_end_io_t *saved_bi_end_io; 212 bio_end_io_t *saved_bi_end_io;
@@ -515,16 +519,28 @@ static void save_stats(struct cache *cache)
515/*---------------------------------------------------------------- 519/*----------------------------------------------------------------
516 * Per bio data 520 * Per bio data
517 *--------------------------------------------------------------*/ 521 *--------------------------------------------------------------*/
518static struct per_bio_data *get_per_bio_data(struct bio *bio) 522
523/*
524 * If using writeback, leave out struct per_bio_data's writethrough fields.
525 */
526#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
527#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
528
529static size_t get_per_bio_data_size(struct cache *cache)
530{
531 return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
532}
533
534static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
519{ 535{
520 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 536 struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
521 BUG_ON(!pb); 537 BUG_ON(!pb);
522 return pb; 538 return pb;
523} 539}
524 540
525static struct per_bio_data *init_per_bio_data(struct bio *bio) 541static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
526{ 542{
527 struct per_bio_data *pb = get_per_bio_data(bio); 543 struct per_bio_data *pb = get_per_bio_data(bio, data_size);
528 544
529 pb->tick = false; 545 pb->tick = false;
530 pb->req_nr = dm_bio_get_target_bio_nr(bio); 546 pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -558,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
558static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 574static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
559{ 575{
560 unsigned long flags; 576 unsigned long flags;
561 struct per_bio_data *pb = get_per_bio_data(bio); 577 size_t pb_data_size = get_per_bio_data_size(cache);
578 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
562 579
563 spin_lock_irqsave(&cache->lock, flags); 580 spin_lock_irqsave(&cache->lock, flags);
564 if (cache->need_tick_bio && 581 if (cache->need_tick_bio &&
@@ -637,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
637 654
638static void writethrough_endio(struct bio *bio, int err) 655static void writethrough_endio(struct bio *bio, int err)
639{ 656{
640 struct per_bio_data *pb = get_per_bio_data(bio); 657 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
641 bio->bi_end_io = pb->saved_bi_end_io; 658 bio->bi_end_io = pb->saved_bi_end_io;
642 659
643 if (err) { 660 if (err) {
@@ -665,7 +682,7 @@ static void writethrough_endio(struct bio *bio, int err)
665static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, 682static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
666 dm_oblock_t oblock, dm_cblock_t cblock) 683 dm_oblock_t oblock, dm_cblock_t cblock)
667{ 684{
668 struct per_bio_data *pb = get_per_bio_data(bio); 685 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
669 686
670 pb->cache = cache; 687 pb->cache = cache;
671 pb->cblock = cblock; 688 pb->cblock = cblock;
@@ -1039,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio)
1039 1056
1040static void process_flush_bio(struct cache *cache, struct bio *bio) 1057static void process_flush_bio(struct cache *cache, struct bio *bio)
1041{ 1058{
1042 struct per_bio_data *pb = get_per_bio_data(bio); 1059 size_t pb_data_size = get_per_bio_data_size(cache);
1060 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1043 1061
1044 BUG_ON(bio->bi_size); 1062 BUG_ON(bio->bi_size);
1045 if (!pb->req_nr) 1063 if (!pb->req_nr)
@@ -1111,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1111 dm_oblock_t block = get_bio_block(cache, bio); 1129 dm_oblock_t block = get_bio_block(cache, bio);
1112 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1130 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1113 struct policy_result lookup_result; 1131 struct policy_result lookup_result;
1114 struct per_bio_data *pb = get_per_bio_data(bio); 1132 size_t pb_data_size = get_per_bio_data_size(cache);
1133 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1115 bool discarded_block = is_discarded_oblock(cache, block); 1134 bool discarded_block = is_discarded_oblock(cache, block);
1116 bool can_migrate = discarded_block || spare_migration_bandwidth(cache); 1135 bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
1117 1136
@@ -1885,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1885 1904
1886 cache->ti = ca->ti; 1905 cache->ti = ca->ti;
1887 ti->private = cache; 1906 ti->private = cache;
1888 ti->per_bio_data_size = sizeof(struct per_bio_data);
1889 ti->num_flush_bios = 2; 1907 ti->num_flush_bios = 2;
1890 ti->flush_supported = true; 1908 ti->flush_supported = true;
1891 1909
@@ -1894,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1894 ti->discard_zeroes_data_unsupported = true; 1912 ti->discard_zeroes_data_unsupported = true;
1895 1913
1896 memcpy(&cache->features, &ca->features, sizeof(cache->features)); 1914 memcpy(&cache->features, &ca->features, sizeof(cache->features));
1915 ti->per_bio_data_size = get_per_bio_data_size(cache);
1897 1916
1898 cache->callbacks.congested_fn = cache_is_congested; 1917 cache->callbacks.congested_fn = cache_is_congested;
1899 dm_table_add_target_callbacks(ti->table, &cache->callbacks); 1918 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -2096,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2096 2115
2097 int r; 2116 int r;
2098 dm_oblock_t block = get_bio_block(cache, bio); 2117 dm_oblock_t block = get_bio_block(cache, bio);
2118 size_t pb_data_size = get_per_bio_data_size(cache);
2099 bool can_migrate = false; 2119 bool can_migrate = false;
2100 bool discarded_block; 2120 bool discarded_block;
2101 struct dm_bio_prison_cell *cell; 2121 struct dm_bio_prison_cell *cell;
@@ -2112,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2112 return DM_MAPIO_REMAPPED; 2132 return DM_MAPIO_REMAPPED;
2113 } 2133 }
2114 2134
2115 pb = init_per_bio_data(bio); 2135 pb = init_per_bio_data(bio, pb_data_size);
2116 2136
2117 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2137 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2118 defer_bio(cache, bio); 2138 defer_bio(cache, bio);
@@ -2197,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2197{ 2217{
2198 struct cache *cache = ti->private; 2218 struct cache *cache = ti->private;
2199 unsigned long flags; 2219 unsigned long flags;
2200 struct per_bio_data *pb = get_per_bio_data(bio); 2220 size_t pb_data_size = get_per_bio_data_size(cache);
2221 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2201 2222
2202 if (pb->tick) { 2223 if (pb->tick) {
2203 policy_tick(cache->policy); 2224 policy_tick(cache->policy);