aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2017-10-19 21:44:32 -0400
committerMike Snitzer <snitzer@redhat.com>2017-11-10 15:44:49 -0500
commit693b960ea891d1b7f89c644cd7eb125554fb2e88 (patch)
tree0c52d1b71be2871be7055771e0189c7a9f5e9f36 /drivers/md/dm-cache-target.c
parent9958f1d9a04efb3db19134482b3f4c6897e0e7b8 (diff)
dm cache: simplify get_per_bio_data() by removing data_size argument
There is only one per_bio_data size now that writethrough-specific data was removed from the per_bio_data structure. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c61
1 files changed, 22 insertions, 39 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index dcfbe6f91972..dd42d5ab8803 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -603,21 +603,16 @@ static unsigned lock_level(struct bio *bio)
603 * Per bio data 603 * Per bio data
604 *--------------------------------------------------------------*/ 604 *--------------------------------------------------------------*/
605 605
606static size_t get_per_bio_data_size(struct cache *cache) 606static struct per_bio_data *get_per_bio_data(struct bio *bio)
607{ 607{
608 return sizeof(struct per_bio_data); 608 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
609}
610
611static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
612{
613 struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
614 BUG_ON(!pb); 609 BUG_ON(!pb);
615 return pb; 610 return pb;
616} 611}
617 612
618static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) 613static struct per_bio_data *init_per_bio_data(struct bio *bio)
619{ 614{
620 struct per_bio_data *pb = get_per_bio_data(bio, data_size); 615 struct per_bio_data *pb = get_per_bio_data(bio);
621 616
622 pb->tick = false; 617 pb->tick = false;
623 pb->req_nr = dm_bio_get_target_bio_nr(bio); 618 pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -657,7 +652,6 @@ static void defer_bios(struct cache *cache, struct bio_list *bios)
657static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio) 652static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
658{ 653{
659 bool r; 654 bool r;
660 size_t pb_size;
661 struct per_bio_data *pb; 655 struct per_bio_data *pb;
662 struct dm_cell_key_v2 key; 656 struct dm_cell_key_v2 key;
663 dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL); 657 dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
@@ -682,8 +676,7 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
682 if (cell != cell_prealloc) 676 if (cell != cell_prealloc)
683 free_prison_cell(cache, cell_prealloc); 677 free_prison_cell(cache, cell_prealloc);
684 678
685 pb_size = get_per_bio_data_size(cache); 679 pb = get_per_bio_data(bio);
686 pb = get_per_bio_data(bio, pb_size);
687 pb->cell = cell; 680 pb->cell = cell;
688 681
689 return r; 682 return r;
@@ -835,12 +828,12 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
835static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 828static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
836{ 829{
837 unsigned long flags; 830 unsigned long flags;
838 size_t pb_data_size = get_per_bio_data_size(cache); 831 struct per_bio_data *pb;
839 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
840 832
841 spin_lock_irqsave(&cache->lock, flags); 833 spin_lock_irqsave(&cache->lock, flags);
842 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && 834 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
843 bio_op(bio) != REQ_OP_DISCARD) { 835 bio_op(bio) != REQ_OP_DISCARD) {
836 pb = get_per_bio_data(bio);
844 pb->tick = true; 837 pb->tick = true;
845 cache->need_tick_bio = false; 838 cache->need_tick_bio = false;
846 } 839 }
@@ -894,10 +887,10 @@ static bool accountable_bio(struct cache *cache, struct bio *bio)
894 887
895static void accounted_begin(struct cache *cache, struct bio *bio) 888static void accounted_begin(struct cache *cache, struct bio *bio)
896{ 889{
897 size_t pb_data_size = get_per_bio_data_size(cache); 890 struct per_bio_data *pb;
898 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
899 891
900 if (accountable_bio(cache, bio)) { 892 if (accountable_bio(cache, bio)) {
893 pb = get_per_bio_data(bio);
901 pb->len = bio_sectors(bio); 894 pb->len = bio_sectors(bio);
902 iot_io_begin(&cache->tracker, pb->len); 895 iot_io_begin(&cache->tracker, pb->len);
903 } 896 }
@@ -905,8 +898,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
905 898
906static void accounted_complete(struct cache *cache, struct bio *bio) 899static void accounted_complete(struct cache *cache, struct bio *bio)
907{ 900{
908 size_t pb_data_size = get_per_bio_data_size(cache); 901 struct per_bio_data *pb = get_per_bio_data(bio);
909 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
910 902
911 iot_io_end(&cache->tracker, pb->len); 903 iot_io_end(&cache->tracker, pb->len);
912} 904}
@@ -1215,8 +1207,7 @@ static int copy(struct dm_cache_migration *mg, bool promote)
1215 1207
1216static void bio_drop_shared_lock(struct cache *cache, struct bio *bio) 1208static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1217{ 1209{
1218 size_t pb_data_size = get_per_bio_data_size(cache); 1210 struct per_bio_data *pb = get_per_bio_data(bio);
1219 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1220 1211
1221 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell)) 1212 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1222 free_prison_cell(cache, pb->cell); 1213 free_prison_cell(cache, pb->cell);
@@ -1227,23 +1218,21 @@ static void overwrite_endio(struct bio *bio)
1227{ 1218{
1228 struct dm_cache_migration *mg = bio->bi_private; 1219 struct dm_cache_migration *mg = bio->bi_private;
1229 struct cache *cache = mg->cache; 1220 struct cache *cache = mg->cache;
1230 size_t pb_data_size = get_per_bio_data_size(cache); 1221 struct per_bio_data *pb = get_per_bio_data(bio);
1231 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1232 1222
1233 dm_unhook_bio(&pb->hook_info, bio); 1223 dm_unhook_bio(&pb->hook_info, bio);
1234 1224
1235 if (bio->bi_status) 1225 if (bio->bi_status)
1236 mg->k.input = bio->bi_status; 1226 mg->k.input = bio->bi_status;
1237 1227
1238 queue_continuation(mg->cache->wq, &mg->k); 1228 queue_continuation(cache->wq, &mg->k);
1239} 1229}
1240 1230
1241static void overwrite(struct dm_cache_migration *mg, 1231static void overwrite(struct dm_cache_migration *mg,
1242 void (*continuation)(struct work_struct *)) 1232 void (*continuation)(struct work_struct *))
1243{ 1233{
1244 struct bio *bio = mg->overwrite_bio; 1234 struct bio *bio = mg->overwrite_bio;
1245 size_t pb_data_size = get_per_bio_data_size(mg->cache); 1235 struct per_bio_data *pb = get_per_bio_data(bio);
1246 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1247 1236
1248 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); 1237 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1249 1238
@@ -1741,8 +1730,6 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1741 int r, data_dir; 1730 int r, data_dir;
1742 bool rb, background_queued; 1731 bool rb, background_queued;
1743 dm_cblock_t cblock; 1732 dm_cblock_t cblock;
1744 size_t pb_data_size = get_per_bio_data_size(cache);
1745 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1746 1733
1747 *commit_needed = false; 1734 *commit_needed = false;
1748 1735
@@ -1791,6 +1778,8 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1791 } 1778 }
1792 1779
1793 if (r == -ENOENT) { 1780 if (r == -ENOENT) {
1781 struct per_bio_data *pb = get_per_bio_data(bio);
1782
1794 /* 1783 /*
1795 * Miss. 1784 * Miss.
1796 */ 1785 */
@@ -1798,7 +1787,6 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1798 if (pb->req_nr == 0) { 1787 if (pb->req_nr == 0) {
1799 accounted_begin(cache, bio); 1788 accounted_begin(cache, bio);
1800 remap_to_origin_clear_discard(cache, bio, block); 1789 remap_to_origin_clear_discard(cache, bio, block);
1801
1802 } else { 1790 } else {
1803 /* 1791 /*
1804 * This is a duplicate writethrough io that is no 1792 * This is a duplicate writethrough io that is no
@@ -1824,7 +1812,6 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1824 invalidate_start(cache, cblock, block, bio); 1812 invalidate_start(cache, cblock, block, bio);
1825 } else 1813 } else
1826 remap_to_origin_clear_discard(cache, bio, block); 1814 remap_to_origin_clear_discard(cache, bio, block);
1827
1828 } else { 1815 } else {
1829 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && 1816 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1830 !is_dirty(cache, cblock)) { 1817 !is_dirty(cache, cblock)) {
@@ -1897,8 +1884,7 @@ static blk_status_t commit_op(void *context)
1897 1884
1898static bool process_flush_bio(struct cache *cache, struct bio *bio) 1885static bool process_flush_bio(struct cache *cache, struct bio *bio)
1899{ 1886{
1900 size_t pb_data_size = get_per_bio_data_size(cache); 1887 struct per_bio_data *pb = get_per_bio_data(bio);
1901 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1902 1888
1903 if (!pb->req_nr) 1889 if (!pb->req_nr)
1904 remap_to_origin(cache, bio); 1890 remap_to_origin(cache, bio);
@@ -2511,9 +2497,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2511 ti->discards_supported = true; 2497 ti->discards_supported = true;
2512 ti->split_discard_bios = false; 2498 ti->split_discard_bios = false;
2513 2499
2514 cache->features = ca->features; 2500 ti->per_io_data_size = sizeof(struct per_bio_data);
2515 ti->per_io_data_size = get_per_bio_data_size(cache);
2516 2501
2502 cache->features = ca->features;
2517 if (writethrough_mode(cache)) { 2503 if (writethrough_mode(cache)) {
2518 /* Create bioset for writethrough bios issued to origin */ 2504 /* Create bioset for writethrough bios issued to origin */
2519 cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0); 2505 cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
@@ -2755,9 +2741,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2755 int r; 2741 int r;
2756 bool commit_needed; 2742 bool commit_needed;
2757 dm_oblock_t block = get_bio_block(cache, bio); 2743 dm_oblock_t block = get_bio_block(cache, bio);
2758 size_t pb_data_size = get_per_bio_data_size(cache);
2759 2744
2760 init_per_bio_data(bio, pb_data_size); 2745 init_per_bio_data(bio);
2761 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { 2746 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2762 /* 2747 /*
2763 * This can only occur if the io goes to a partial block at 2748 * This can only occur if the io goes to a partial block at
@@ -2781,13 +2766,11 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2781 return r; 2766 return r;
2782} 2767}
2783 2768
2784static int cache_end_io(struct dm_target *ti, struct bio *bio, 2769static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2785 blk_status_t *error)
2786{ 2770{
2787 struct cache *cache = ti->private; 2771 struct cache *cache = ti->private;
2788 unsigned long flags; 2772 unsigned long flags;
2789 size_t pb_data_size = get_per_bio_data_size(cache); 2773 struct per_bio_data *pb = get_per_bio_data(bio);
2790 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2791 2774
2792 if (pb->tick) { 2775 if (pb->tick) {
2793 policy_tick(cache->policy, false); 2776 policy_tick(cache->policy, false);