aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c85
1 files changed, 71 insertions, 14 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2378ee88b1e8..2d9e75586d60 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -549,6 +549,7 @@ struct pool_c {
549 */ 549 */
550struct thin_c { 550struct thin_c {
551 struct dm_dev *pool_dev; 551 struct dm_dev *pool_dev;
552 struct dm_dev *origin_dev;
552 dm_thin_id dev_id; 553 dm_thin_id dev_id;
553 554
554 struct pool *pool; 555 struct pool *pool;
@@ -666,14 +667,16 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
666 (bio->bi_sector & pool->offset_mask); 667 (bio->bi_sector & pool->offset_mask);
667} 668}
668 669
669static void remap_and_issue(struct thin_c *tc, struct bio *bio, 670static void remap_to_origin(struct thin_c *tc, struct bio *bio)
670 dm_block_t block) 671{
672 bio->bi_bdev = tc->origin_dev->bdev;
673}
674
675static void issue(struct thin_c *tc, struct bio *bio)
671{ 676{
672 struct pool *pool = tc->pool; 677 struct pool *pool = tc->pool;
673 unsigned long flags; 678 unsigned long flags;
674 679
675 remap(tc, bio, block);
676
677 /* 680 /*
678 * Batch together any FUA/FLUSH bios we find and then issue 681 * Batch together any FUA/FLUSH bios we find and then issue
679 * a single commit for them in process_deferred_bios(). 682 * a single commit for them in process_deferred_bios().
@@ -686,6 +689,19 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
686 generic_make_request(bio); 689 generic_make_request(bio);
687} 690}
688 691
692static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
693{
694 remap_to_origin(tc, bio);
695 issue(tc, bio);
696}
697
698static void remap_and_issue(struct thin_c *tc, struct bio *bio,
699 dm_block_t block)
700{
701 remap(tc, bio, block);
702 issue(tc, bio);
703}
704
689/* 705/*
690 * wake_worker() is used when new work is queued and when pool_resume is 706 * wake_worker() is used when new work is queued and when pool_resume is
691 * ready to continue deferred IO processing. 707 * ready to continue deferred IO processing.
@@ -932,7 +948,8 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
932} 948}
933 949
934static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 950static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
935 dm_block_t data_origin, dm_block_t data_dest, 951 struct dm_dev *origin, dm_block_t data_origin,
952 dm_block_t data_dest,
936 struct cell *cell, struct bio *bio) 953 struct cell *cell, struct bio *bio)
937{ 954{
938 int r; 955 int r;
@@ -964,7 +981,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
964 } else { 981 } else {
965 struct dm_io_region from, to; 982 struct dm_io_region from, to;
966 983
967 from.bdev = tc->pool_dev->bdev; 984 from.bdev = origin->bdev;
968 from.sector = data_origin * pool->sectors_per_block; 985 from.sector = data_origin * pool->sectors_per_block;
969 from.count = pool->sectors_per_block; 986 from.count = pool->sectors_per_block;
970 987
@@ -982,6 +999,22 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
982 } 999 }
983} 1000}
984 1001
1002static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1003 dm_block_t data_origin, dm_block_t data_dest,
1004 struct cell *cell, struct bio *bio)
1005{
1006 schedule_copy(tc, virt_block, tc->pool_dev,
1007 data_origin, data_dest, cell, bio);
1008}
1009
1010static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1011 dm_block_t data_dest,
1012 struct cell *cell, struct bio *bio)
1013{
1014 schedule_copy(tc, virt_block, tc->origin_dev,
1015 virt_block, data_dest, cell, bio);
1016}
1017
985static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1018static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
986 dm_block_t data_block, struct cell *cell, 1019 dm_block_t data_block, struct cell *cell,
987 struct bio *bio) 1020 struct bio *bio)
@@ -1128,8 +1161,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1128 r = alloc_data_block(tc, &data_block); 1161 r = alloc_data_block(tc, &data_block);
1129 switch (r) { 1162 switch (r) {
1130 case 0: 1163 case 0:
1131 schedule_copy(tc, block, lookup_result->block, 1164 schedule_internal_copy(tc, block, lookup_result->block,
1132 data_block, cell, bio); 1165 data_block, cell, bio);
1133 break; 1166 break;
1134 1167
1135 case -ENOSPC: 1168 case -ENOSPC:
@@ -1203,7 +1236,10 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1203 r = alloc_data_block(tc, &data_block); 1236 r = alloc_data_block(tc, &data_block);
1204 switch (r) { 1237 switch (r) {
1205 case 0: 1238 case 0:
1206 schedule_zero(tc, block, data_block, cell, bio); 1239 if (tc->origin_dev)
1240 schedule_external_copy(tc, block, data_block, cell, bio);
1241 else
1242 schedule_zero(tc, block, data_block, cell, bio);
1207 break; 1243 break;
1208 1244
1209 case -ENOSPC: 1245 case -ENOSPC:
@@ -1254,7 +1290,11 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1254 break; 1290 break;
1255 1291
1256 case -ENODATA: 1292 case -ENODATA:
1257 provision_block(tc, bio, block, cell); 1293 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1294 cell_release_singleton(cell, bio);
1295 remap_to_origin_and_issue(tc, bio);
1296 } else
1297 provision_block(tc, bio, block, cell);
1258 break; 1298 break;
1259 1299
1260 default: 1300 default:
@@ -2237,6 +2277,8 @@ static void thin_dtr(struct dm_target *ti)
2237 __pool_dec(tc->pool); 2277 __pool_dec(tc->pool);
2238 dm_pool_close_thin_device(tc->td); 2278 dm_pool_close_thin_device(tc->td);
2239 dm_put_device(ti, tc->pool_dev); 2279 dm_put_device(ti, tc->pool_dev);
2280 if (tc->origin_dev)
2281 dm_put_device(ti, tc->origin_dev);
2240 kfree(tc); 2282 kfree(tc);
2241 2283
2242 mutex_unlock(&dm_thin_pool_table.mutex); 2284 mutex_unlock(&dm_thin_pool_table.mutex);
@@ -2245,21 +2287,22 @@ static void thin_dtr(struct dm_target *ti)
2245/* 2287/*
2246 * Thin target parameters: 2288 * Thin target parameters:
2247 * 2289 *
2248 * <pool_dev> <dev_id> 2290 * <pool_dev> <dev_id> [origin_dev]
2249 * 2291 *
2250 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool) 2292 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2251 * dev_id: the internal device identifier 2293 * dev_id: the internal device identifier
2294 * origin_dev: a device external to the pool that should act as the origin
2252 */ 2295 */
2253static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) 2296static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2254{ 2297{
2255 int r; 2298 int r;
2256 struct thin_c *tc; 2299 struct thin_c *tc;
2257 struct dm_dev *pool_dev; 2300 struct dm_dev *pool_dev, *origin_dev;
2258 struct mapped_device *pool_md; 2301 struct mapped_device *pool_md;
2259 2302
2260 mutex_lock(&dm_thin_pool_table.mutex); 2303 mutex_lock(&dm_thin_pool_table.mutex);
2261 2304
2262 if (argc != 2) { 2305 if (argc != 2 && argc != 3) {
2263 ti->error = "Invalid argument count"; 2306 ti->error = "Invalid argument count";
2264 r = -EINVAL; 2307 r = -EINVAL;
2265 goto out_unlock; 2308 goto out_unlock;
@@ -2272,6 +2315,15 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2272 goto out_unlock; 2315 goto out_unlock;
2273 } 2316 }
2274 2317
2318 if (argc == 3) {
2319 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2320 if (r) {
2321 ti->error = "Error opening origin device";
2322 goto bad_origin_dev;
2323 }
2324 tc->origin_dev = origin_dev;
2325 }
2326
2275 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); 2327 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2276 if (r) { 2328 if (r) {
2277 ti->error = "Error opening pool device"; 2329 ti->error = "Error opening pool device";
@@ -2324,6 +2376,9 @@ bad_pool_lookup:
2324bad_common: 2376bad_common:
2325 dm_put_device(ti, tc->pool_dev); 2377 dm_put_device(ti, tc->pool_dev);
2326bad_pool_dev: 2378bad_pool_dev:
2379 if (tc->origin_dev)
2380 dm_put_device(ti, tc->origin_dev);
2381bad_origin_dev:
2327 kfree(tc); 2382 kfree(tc);
2328out_unlock: 2383out_unlock:
2329 mutex_unlock(&dm_thin_pool_table.mutex); 2384 mutex_unlock(&dm_thin_pool_table.mutex);
@@ -2382,6 +2437,8 @@ static int thin_status(struct dm_target *ti, status_type_t type,
2382 DMEMIT("%s %lu", 2437 DMEMIT("%s %lu",
2383 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), 2438 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2384 (unsigned long) tc->dev_id); 2439 (unsigned long) tc->dev_id);
2440 if (tc->origin_dev)
2441 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2385 break; 2442 break;
2386 } 2443 }
2387 } 2444 }
@@ -2419,7 +2476,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2419 2476
2420static struct target_type thin_target = { 2477static struct target_type thin_target = {
2421 .name = "thin", 2478 .name = "thin",
2422 .version = {1, 0, 0}, 2479 .version = {1, 1, 0},
2423 .module = THIS_MODULE, 2480 .module = THIS_MODULE,
2424 .ctr = thin_ctr, 2481 .ctr = thin_ctr,
2425 .dtr = thin_dtr, 2482 .dtr = thin_dtr,