aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2012-03-28 13:41:28 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-03-28 13:41:28 -0400
commit104655fd4dcebd50068ef30253a001da72e3a081 (patch)
treece4cc70f3ecf643d1c63948f902bc135b17750c7 /drivers/md/dm-thin.c
parenteb2aa48d4eb7aee63cba201bf47641dad3e92250 (diff)
dm thin: support discards
Support discards in the thin target. On discard the corresponding mapping(s) are removed from the thin device. If the associated block(s) are no longer shared the discard is passed to the underlying device. All bios other than discards now have an associated deferred_entry that is saved to the 'all_io_entry' in endio_hook. When non-discard IO completes and associated mappings are quiesced any discards that were deferred, via ds_add_work() in process_discard(), will be queued for processing by the worker thread. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> drivers/md/dm-thin.c | 173 ++++++++++++++++++++++++++++++++++++++++++++++---- drivers/md/dm-thin.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 158 insertions(+), 14 deletions(-)
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c172
1 files changed, 158 insertions, 14 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 188121ca00aa..703bbbc4f16f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -520,10 +520,12 @@ struct pool {
520 struct bio_list deferred_bios; 520 struct bio_list deferred_bios;
521 struct bio_list deferred_flush_bios; 521 struct bio_list deferred_flush_bios;
522 struct list_head prepared_mappings; 522 struct list_head prepared_mappings;
523 struct list_head prepared_discards;
523 524
524 struct bio_list retry_on_resume_list; 525 struct bio_list retry_on_resume_list;
525 526
526 struct deferred_set shared_read_ds; 527 struct deferred_set shared_read_ds;
528 struct deferred_set all_io_ds;
527 529
528 struct new_mapping *next_mapping; 530 struct new_mapping *next_mapping;
529 mempool_t *mapping_pool; 531 mempool_t *mapping_pool;
@@ -621,6 +623,7 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
621struct endio_hook { 623struct endio_hook {
622 struct thin_c *tc; 624 struct thin_c *tc;
623 struct deferred_entry *shared_read_entry; 625 struct deferred_entry *shared_read_entry;
626 struct deferred_entry *all_io_entry;
624 struct new_mapping *overwrite_mapping; 627 struct new_mapping *overwrite_mapping;
625}; 628};
626 629
@@ -728,11 +731,12 @@ struct new_mapping {
728 731
729 unsigned quiesced:1; 732 unsigned quiesced:1;
730 unsigned prepared:1; 733 unsigned prepared:1;
734 unsigned pass_discard:1;
731 735
732 struct thin_c *tc; 736 struct thin_c *tc;
733 dm_block_t virt_block; 737 dm_block_t virt_block;
734 dm_block_t data_block; 738 dm_block_t data_block;
735 struct cell *cell; 739 struct cell *cell, *cell2;
736 int err; 740 int err;
737 741
738 /* 742 /*
@@ -872,7 +876,30 @@ static void process_prepared_mapping(struct new_mapping *m)
872 mempool_free(m, tc->pool->mapping_pool); 876 mempool_free(m, tc->pool->mapping_pool);
873} 877}
874 878
875static void process_prepared_mappings(struct pool *pool) 879static void process_prepared_discard(struct new_mapping *m)
880{
881 int r;
882 struct thin_c *tc = m->tc;
883
884 r = dm_thin_remove_block(tc->td, m->virt_block);
885 if (r)
886 DMERR("dm_thin_remove_block() failed");
887
888 /*
889 * Pass the discard down to the underlying device?
890 */
891 if (m->pass_discard)
892 remap_and_issue(tc, m->bio, m->data_block);
893 else
894 bio_endio(m->bio, 0);
895
896 cell_defer_except(tc, m->cell);
897 cell_defer_except(tc, m->cell2);
898 mempool_free(m, tc->pool->mapping_pool);
899}
900
901static void process_prepared(struct pool *pool, struct list_head *head,
902 void (*fn)(struct new_mapping *))
876{ 903{
877 unsigned long flags; 904 unsigned long flags;
878 struct list_head maps; 905 struct list_head maps;
@@ -880,21 +907,27 @@ static void process_prepared_mappings(struct pool *pool)
880 907
881 INIT_LIST_HEAD(&maps); 908 INIT_LIST_HEAD(&maps);
882 spin_lock_irqsave(&pool->lock, flags); 909 spin_lock_irqsave(&pool->lock, flags);
883 list_splice_init(&pool->prepared_mappings, &maps); 910 list_splice_init(head, &maps);
884 spin_unlock_irqrestore(&pool->lock, flags); 911 spin_unlock_irqrestore(&pool->lock, flags);
885 912
886 list_for_each_entry_safe(m, tmp, &maps, list) 913 list_for_each_entry_safe(m, tmp, &maps, list)
887 process_prepared_mapping(m); 914 fn(m);
888} 915}
889 916
890/* 917/*
891 * Deferred bio jobs. 918 * Deferred bio jobs.
892 */ 919 */
893static int io_overwrites_block(struct pool *pool, struct bio *bio) 920static int io_overlaps_block(struct pool *pool, struct bio *bio)
894{ 921{
895 return ((bio_data_dir(bio) == WRITE) && 922 return !(bio->bi_sector & pool->offset_mask) &&
896 !(bio->bi_sector & pool->offset_mask)) &&
897 (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT)); 923 (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
924
925}
926
927static int io_overwrites_block(struct pool *pool, struct bio *bio)
928{
929 return (bio_data_dir(bio) == WRITE) &&
930 io_overlaps_block(pool, bio);
898} 931}
899 932
900static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, 933static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
@@ -1134,6 +1167,86 @@ static void no_space(struct cell *cell)
1134 retry_on_resume(bio); 1167 retry_on_resume(bio);
1135} 1168}
1136 1169
1170static void process_discard(struct thin_c *tc, struct bio *bio)
1171{
1172 int r;
1173 struct pool *pool = tc->pool;
1174 struct cell *cell, *cell2;
1175 struct cell_key key, key2;
1176 dm_block_t block = get_bio_block(tc, bio);
1177 struct dm_thin_lookup_result lookup_result;
1178 struct new_mapping *m;
1179
1180 build_virtual_key(tc->td, block, &key);
1181 if (bio_detain(tc->pool->prison, &key, bio, &cell))
1182 return;
1183
1184 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1185 switch (r) {
1186 case 0:
1187 /*
1188 * Check nobody is fiddling with this pool block. This can
1189 * happen if someone's in the process of breaking sharing
1190 * on this block.
1191 */
1192 build_data_key(tc->td, lookup_result.block, &key2);
1193 if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1194 cell_release_singleton(cell, bio);
1195 break;
1196 }
1197
1198 if (io_overlaps_block(pool, bio)) {
1199 /*
1200 * IO may still be going to the destination block. We must
1201 * quiesce before we can do the removal.
1202 */
1203 m = get_next_mapping(pool);
1204 m->tc = tc;
1205 m->pass_discard = !lookup_result.shared;
1206 m->virt_block = block;
1207 m->data_block = lookup_result.block;
1208 m->cell = cell;
1209 m->cell2 = cell2;
1210 m->err = 0;
1211 m->bio = bio;
1212
1213 if (!ds_add_work(&pool->all_io_ds, &m->list)) {
1214 list_add(&m->list, &pool->prepared_discards);
1215 wake_worker(pool);
1216 }
1217 } else {
1218 /*
1219 * This path is hit if people are ignoring
1220 * limits->discard_granularity. It ignores any
1221 * part of the discard that is in a subsequent
1222 * block.
1223 */
1224 sector_t offset = bio->bi_sector - (block << pool->block_shift);
1225 unsigned remaining = (pool->sectors_per_block - offset) << 9;
1226 bio->bi_size = min(bio->bi_size, remaining);
1227
1228 cell_release_singleton(cell, bio);
1229 cell_release_singleton(cell2, bio);
1230 remap_and_issue(tc, bio, lookup_result.block);
1231 }
1232 break;
1233
1234 case -ENODATA:
1235 /*
1236 * It isn't provisioned, just forget it.
1237 */
1238 cell_release_singleton(cell, bio);
1239 bio_endio(bio, 0);
1240 break;
1241
1242 default:
1243 DMERR("discard: find block unexpectedly returned %d", r);
1244 cell_release_singleton(cell, bio);
1245 bio_io_error(bio);
1246 break;
1247 }
1248}
1249
1137static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1250static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1138 struct cell_key *key, 1251 struct cell_key *key,
1139 struct dm_thin_lookup_result *lookup_result, 1252 struct dm_thin_lookup_result *lookup_result,
@@ -1279,6 +1392,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1279 1392
1280 default: 1393 default:
1281 DMERR("dm_thin_find_block() failed, error = %d", r); 1394 DMERR("dm_thin_find_block() failed, error = %d", r);
1395 cell_release_singleton(cell, bio);
1282 bio_io_error(bio); 1396 bio_io_error(bio);
1283 break; 1397 break;
1284 } 1398 }
@@ -1320,7 +1434,11 @@ static void process_deferred_bios(struct pool *pool)
1320 1434
1321 break; 1435 break;
1322 } 1436 }
1323 process_bio(tc, bio); 1437
1438 if (bio->bi_rw & REQ_DISCARD)
1439 process_discard(tc, bio);
1440 else
1441 process_bio(tc, bio);
1324 } 1442 }
1325 1443
1326 /* 1444 /*
@@ -1354,7 +1472,8 @@ static void do_worker(struct work_struct *ws)
1354{ 1472{
1355 struct pool *pool = container_of(ws, struct pool, worker); 1473 struct pool *pool = container_of(ws, struct pool, worker);
1356 1474
1357 process_prepared_mappings(pool); 1475 process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping);
1476 process_prepared(pool, &pool->prepared_discards, process_prepared_discard);
1358 process_deferred_bios(pool); 1477 process_deferred_bios(pool);
1359} 1478}
1360 1479
@@ -1397,6 +1516,7 @@ static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1397 1516
1398 h->tc = tc; 1517 h->tc = tc;
1399 h->shared_read_entry = NULL; 1518 h->shared_read_entry = NULL;
1519 h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
1400 h->overwrite_mapping = NULL; 1520 h->overwrite_mapping = NULL;
1401 1521
1402 return h; 1522 return h;
@@ -1415,7 +1535,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1415 struct dm_thin_lookup_result result; 1535 struct dm_thin_lookup_result result;
1416 1536
1417 map_context->ptr = thin_hook_bio(tc, bio); 1537 map_context->ptr = thin_hook_bio(tc, bio);
1418 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1538 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1419 thin_defer_bio(tc, bio); 1539 thin_defer_bio(tc, bio);
1420 return DM_MAPIO_SUBMITTED; 1540 return DM_MAPIO_SUBMITTED;
1421 } 1541 }
@@ -1591,10 +1711,12 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1591 bio_list_init(&pool->deferred_bios); 1711 bio_list_init(&pool->deferred_bios);
1592 bio_list_init(&pool->deferred_flush_bios); 1712 bio_list_init(&pool->deferred_flush_bios);
1593 INIT_LIST_HEAD(&pool->prepared_mappings); 1713 INIT_LIST_HEAD(&pool->prepared_mappings);
1714 INIT_LIST_HEAD(&pool->prepared_discards);
1594 pool->low_water_triggered = 0; 1715 pool->low_water_triggered = 0;
1595 pool->no_free_space = 0; 1716 pool->no_free_space = 0;
1596 bio_list_init(&pool->retry_on_resume_list); 1717 bio_list_init(&pool->retry_on_resume_list);
1597 ds_init(&pool->shared_read_ds); 1718 ds_init(&pool->shared_read_ds);
1719 ds_init(&pool->all_io_ds);
1598 1720
1599 pool->next_mapping = NULL; 1721 pool->next_mapping = NULL;
1600 pool->mapping_pool = 1722 pool->mapping_pool =
@@ -1834,7 +1956,8 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1834 pt->low_water_blocks = low_water_blocks; 1956 pt->low_water_blocks = low_water_blocks;
1835 pt->zero_new_blocks = pf.zero_new_blocks; 1957 pt->zero_new_blocks = pf.zero_new_blocks;
1836 ti->num_flush_requests = 1; 1958 ti->num_flush_requests = 1;
1837 ti->num_discard_requests = 0; 1959 ti->num_discard_requests = 1;
1960 ti->discards_supported = 1;
1838 ti->private = pt; 1961 ti->private = pt;
1839 1962
1840 pt->callbacks.congested_fn = pool_is_congested; 1963 pt->callbacks.congested_fn = pool_is_congested;
@@ -2227,6 +2350,17 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2227 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2350 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2228} 2351}
2229 2352
2353static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
2354{
2355 limits->max_discard_sectors = pool->sectors_per_block;
2356
2357 /*
2358 * This is just a hint, and not enforced. We have to cope with
2359 * bios that overlap 2 blocks.
2360 */
2361 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2362}
2363
2230static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 2364static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2231{ 2365{
2232 struct pool_c *pt = ti->private; 2366 struct pool_c *pt = ti->private;
@@ -2234,6 +2368,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2234 2368
2235 blk_limits_io_min(limits, 0); 2369 blk_limits_io_min(limits, 0);
2236 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 2370 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2371 set_discard_limits(pool, limits);
2237} 2372}
2238 2373
2239static struct target_type pool_target = { 2374static struct target_type pool_target = {
@@ -2350,8 +2485,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2350 2485
2351 ti->split_io = tc->pool->sectors_per_block; 2486 ti->split_io = tc->pool->sectors_per_block;
2352 ti->num_flush_requests = 1; 2487 ti->num_flush_requests = 1;
2353 ti->num_discard_requests = 0; 2488 ti->num_discard_requests = 1;
2354 ti->discards_supported = 0; 2489 ti->discards_supported = 1;
2355 2490
2356 dm_put(pool_md); 2491 dm_put(pool_md);
2357 2492
@@ -2407,6 +2542,13 @@ static int thin_endio(struct dm_target *ti,
2407 spin_unlock_irqrestore(&pool->lock, flags); 2542 spin_unlock_irqrestore(&pool->lock, flags);
2408 } 2543 }
2409 2544
2545 if (h->all_io_entry) {
2546 INIT_LIST_HEAD(&work);
2547 ds_dec(h->all_io_entry, &work);
2548 list_for_each_entry_safe(m, tmp, &work, list)
2549 list_add(&m->list, &pool->prepared_discards);
2550 }
2551
2410 mempool_free(h, pool->endio_hook_pool); 2552 mempool_free(h, pool->endio_hook_pool);
2411 2553
2412 return 0; 2554 return 0;
@@ -2487,9 +2629,11 @@ static int thin_iterate_devices(struct dm_target *ti,
2487static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) 2629static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2488{ 2630{
2489 struct thin_c *tc = ti->private; 2631 struct thin_c *tc = ti->private;
2632 struct pool *pool = tc->pool;
2490 2633
2491 blk_limits_io_min(limits, 0); 2634 blk_limits_io_min(limits, 0);
2492 blk_limits_io_opt(limits, tc->pool->sectors_per_block << SECTOR_SHIFT); 2635 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2636 set_discard_limits(pool, limits);
2493} 2637}
2494 2638
2495static struct target_type thin_target = { 2639static struct target_type thin_target = {