diff options
author | David Vrabel <david.vrabel@citrix.com> | 2014-04-07 08:52:12 -0400 |
---|---|---|
committer | David Vrabel <david.vrabel@citrix.com> | 2014-04-07 08:52:12 -0400 |
commit | 2c5cb2770392fb9c5d8518688c8bc61986d70dc6 (patch) | |
tree | b19210e709de6ee0d22b67ef605a569500cf1a18 /drivers/md/dm-cache-target.c | |
parent | cd979883b9ede90643e019f33cb317933eb867b4 (diff) | |
parent | 683b6c6f82a60fabf47012581c2cfbf1b037ab95 (diff) |
Merge commit '683b6c6f82a60fabf47012581c2cfbf1b037ab95' into stable/for-linus-3.15
This merge of the irq-core-for-linus branch broke the ARM build when
Xen is enabled.
Conflicts:
drivers/xen/events/events_base.c
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index ffd472e015ca..074b9c8e4cf0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -289,6 +289,7 @@ struct per_bio_data { | |||
289 | bool tick:1; | 289 | bool tick:1; |
290 | unsigned req_nr:2; | 290 | unsigned req_nr:2; |
291 | struct dm_deferred_entry *all_io_entry; | 291 | struct dm_deferred_entry *all_io_entry; |
292 | struct dm_hook_info hook_info; | ||
292 | 293 | ||
293 | /* | 294 | /* |
294 | * writethrough fields. These MUST remain at the end of this | 295 | * writethrough fields. These MUST remain at the end of this |
@@ -297,7 +298,6 @@ struct per_bio_data { | |||
297 | */ | 298 | */ |
298 | struct cache *cache; | 299 | struct cache *cache; |
299 | dm_cblock_t cblock; | 300 | dm_cblock_t cblock; |
300 | struct dm_hook_info hook_info; | ||
301 | struct dm_bio_details bio_details; | 301 | struct dm_bio_details bio_details; |
302 | }; | 302 | }; |
303 | 303 | ||
@@ -671,15 +671,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, | |||
671 | dm_cblock_t cblock) | 671 | dm_cblock_t cblock) |
672 | { | 672 | { |
673 | sector_t bi_sector = bio->bi_iter.bi_sector; | 673 | sector_t bi_sector = bio->bi_iter.bi_sector; |
674 | sector_t block = from_cblock(cblock); | ||
674 | 675 | ||
675 | bio->bi_bdev = cache->cache_dev->bdev; | 676 | bio->bi_bdev = cache->cache_dev->bdev; |
676 | if (!block_size_is_power_of_two(cache)) | 677 | if (!block_size_is_power_of_two(cache)) |
677 | bio->bi_iter.bi_sector = | 678 | bio->bi_iter.bi_sector = |
678 | (from_cblock(cblock) * cache->sectors_per_block) + | 679 | (block * cache->sectors_per_block) + |
679 | sector_div(bi_sector, cache->sectors_per_block); | 680 | sector_div(bi_sector, cache->sectors_per_block); |
680 | else | 681 | else |
681 | bio->bi_iter.bi_sector = | 682 | bio->bi_iter.bi_sector = |
682 | (from_cblock(cblock) << cache->sectors_per_block_shift) | | 683 | (block << cache->sectors_per_block_shift) | |
683 | (bi_sector & (cache->sectors_per_block - 1)); | 684 | (bi_sector & (cache->sectors_per_block - 1)); |
684 | } | 685 | } |
685 | 686 | ||
@@ -978,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) | |||
978 | int r; | 979 | int r; |
979 | struct dm_io_region o_region, c_region; | 980 | struct dm_io_region o_region, c_region; |
980 | struct cache *cache = mg->cache; | 981 | struct cache *cache = mg->cache; |
982 | sector_t cblock = from_cblock(mg->cblock); | ||
981 | 983 | ||
982 | o_region.bdev = cache->origin_dev->bdev; | 984 | o_region.bdev = cache->origin_dev->bdev; |
983 | o_region.count = cache->sectors_per_block; | 985 | o_region.count = cache->sectors_per_block; |
984 | 986 | ||
985 | c_region.bdev = cache->cache_dev->bdev; | 987 | c_region.bdev = cache->cache_dev->bdev; |
986 | c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; | 988 | c_region.sector = cblock * cache->sectors_per_block; |
987 | c_region.count = cache->sectors_per_block; | 989 | c_region.count = cache->sectors_per_block; |
988 | 990 | ||
989 | if (mg->writeback || mg->demote) { | 991 | if (mg->writeback || mg->demote) { |
@@ -1010,13 +1012,15 @@ static void overwrite_endio(struct bio *bio, int err) | |||
1010 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | 1012 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
1011 | unsigned long flags; | 1013 | unsigned long flags; |
1012 | 1014 | ||
1015 | dm_unhook_bio(&pb->hook_info, bio); | ||
1016 | |||
1013 | if (err) | 1017 | if (err) |
1014 | mg->err = true; | 1018 | mg->err = true; |
1015 | 1019 | ||
1020 | mg->requeue_holder = false; | ||
1021 | |||
1016 | spin_lock_irqsave(&cache->lock, flags); | 1022 | spin_lock_irqsave(&cache->lock, flags); |
1017 | list_add_tail(&mg->list, &cache->completed_migrations); | 1023 | list_add_tail(&mg->list, &cache->completed_migrations); |
1018 | dm_unhook_bio(&pb->hook_info, bio); | ||
1019 | mg->requeue_holder = false; | ||
1020 | spin_unlock_irqrestore(&cache->lock, flags); | 1024 | spin_unlock_irqrestore(&cache->lock, flags); |
1021 | 1025 | ||
1022 | wake_worker(cache); | 1026 | wake_worker(cache); |
@@ -2461,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2461 | bool discarded_block; | 2465 | bool discarded_block; |
2462 | struct dm_bio_prison_cell *cell; | 2466 | struct dm_bio_prison_cell *cell; |
2463 | struct policy_result lookup_result; | 2467 | struct policy_result lookup_result; |
2464 | struct per_bio_data *pb; | 2468 | struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); |
2465 | 2469 | ||
2466 | if (from_oblock(block) > from_oblock(cache->origin_blocks)) { | 2470 | if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { |
2467 | /* | 2471 | /* |
2468 | * This can only occur if the io goes to a partial block at | 2472 | * This can only occur if the io goes to a partial block at |
2469 | * the end of the origin device. We don't cache these. | 2473 | * the end of the origin device. We don't cache these. |
2470 | * Just remap to the origin and carry on. | 2474 | * Just remap to the origin and carry on. |
2471 | */ | 2475 | */ |
2472 | remap_to_origin_clear_discard(cache, bio, block); | 2476 | remap_to_origin(cache, bio); |
2473 | return DM_MAPIO_REMAPPED; | 2477 | return DM_MAPIO_REMAPPED; |
2474 | } | 2478 | } |
2475 | 2479 | ||
2476 | pb = init_per_bio_data(bio, pb_data_size); | ||
2477 | |||
2478 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2480 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2479 | defer_bio(cache, bio); | 2481 | defer_bio(cache, bio); |
2480 | return DM_MAPIO_SUBMITTED; | 2482 | return DM_MAPIO_SUBMITTED; |