aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2012-03-28 13:41:28 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-03-28 13:41:28 -0400
commiteb2aa48d4eb7aee63cba201bf47641dad3e92250 (patch)
tree3e160010319f6c4eb30770d07d6fb089955f5704 /drivers
parent6efd6e83092cd4a7532270bc843de90bb93f6683 (diff)
dm thin: prepare to support discard
This patch contains the ground work needed for dm-thin to support discard. - Adds endio function that replaces shared_read_endio. - Introduce an explicit 'quiesced' flag into the new_mapping structure. Before, this was implicitly indicated by m->list being empty. - The map_info->ptr remains constant for the duration of a bio's trip through the thin target. Make it easier to reason about it. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-thin.c125
1 files changed, 72 insertions, 53 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7ca2bf2aafaa..188121ca00aa 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -523,7 +523,7 @@ struct pool {
523 523
524 struct bio_list retry_on_resume_list; 524 struct bio_list retry_on_resume_list;
525 525
526 struct deferred_set ds; /* FIXME: move to thin_c */ 526 struct deferred_set shared_read_ds;
527 527
528 struct new_mapping *next_mapping; 528 struct new_mapping *next_mapping;
529 mempool_t *mapping_pool; 529 mempool_t *mapping_pool;
@@ -618,6 +618,12 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
618 618
619/*----------------------------------------------------------------*/ 619/*----------------------------------------------------------------*/
620 620
621struct endio_hook {
622 struct thin_c *tc;
623 struct deferred_entry *shared_read_entry;
624 struct new_mapping *overwrite_mapping;
625};
626
621static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) 627static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
622{ 628{
623 struct bio *bio; 629 struct bio *bio;
@@ -628,7 +634,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
628 bio_list_init(master); 634 bio_list_init(master);
629 635
630 while ((bio = bio_list_pop(&bios))) { 636 while ((bio = bio_list_pop(&bios))) {
631 if (dm_get_mapinfo(bio)->ptr == tc) 637 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
638 if (h->tc == tc)
632 bio_endio(bio, DM_ENDIO_REQUEUE); 639 bio_endio(bio, DM_ENDIO_REQUEUE);
633 else 640 else
634 bio_list_add(master, bio); 641 bio_list_add(master, bio);
@@ -716,16 +723,11 @@ static void wake_worker(struct pool *pool)
716/* 723/*
717 * Bio endio functions. 724 * Bio endio functions.
718 */ 725 */
719struct endio_hook {
720 struct thin_c *tc;
721 bio_end_io_t *saved_bi_end_io;
722 struct deferred_entry *entry;
723};
724
725struct new_mapping { 726struct new_mapping {
726 struct list_head list; 727 struct list_head list;
727 728
728 int prepared; 729 unsigned quiesced:1;
730 unsigned prepared:1;
729 731
730 struct thin_c *tc; 732 struct thin_c *tc;
731 dm_block_t virt_block; 733 dm_block_t virt_block;
@@ -747,7 +749,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
747{ 749{
748 struct pool *pool = m->tc->pool; 750 struct pool *pool = m->tc->pool;
749 751
750 if (list_empty(&m->list) && m->prepared) { 752 if (m->quiesced && m->prepared) {
751 list_add(&m->list, &pool->prepared_mappings); 753 list_add(&m->list, &pool->prepared_mappings);
752 wake_worker(pool); 754 wake_worker(pool);
753 } 755 }
@@ -770,7 +772,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
770static void overwrite_endio(struct bio *bio, int err) 772static void overwrite_endio(struct bio *bio, int err)
771{ 773{
772 unsigned long flags; 774 unsigned long flags;
773 struct new_mapping *m = dm_get_mapinfo(bio)->ptr; 775 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
776 struct new_mapping *m = h->overwrite_mapping;
774 struct pool *pool = m->tc->pool; 777 struct pool *pool = m->tc->pool;
775 778
776 m->err = err; 779 m->err = err;
@@ -781,31 +784,6 @@ static void overwrite_endio(struct bio *bio, int err)
781 spin_unlock_irqrestore(&pool->lock, flags); 784 spin_unlock_irqrestore(&pool->lock, flags);
782} 785}
783 786
784static void shared_read_endio(struct bio *bio, int err)
785{
786 struct list_head mappings;
787 struct new_mapping *m, *tmp;
788 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
789 unsigned long flags;
790 struct pool *pool = h->tc->pool;
791
792 bio->bi_end_io = h->saved_bi_end_io;
793 bio_endio(bio, err);
794
795 INIT_LIST_HEAD(&mappings);
796 ds_dec(h->entry, &mappings);
797
798 spin_lock_irqsave(&pool->lock, flags);
799 list_for_each_entry_safe(m, tmp, &mappings, list) {
800 list_del(&m->list);
801 INIT_LIST_HEAD(&m->list);
802 __maybe_add_mapping(m);
803 }
804 spin_unlock_irqrestore(&pool->lock, flags);
805
806 mempool_free(h, pool->endio_hook_pool);
807}
808
809/*----------------------------------------------------------------*/ 787/*----------------------------------------------------------------*/
810 788
811/* 789/*
@@ -957,6 +935,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
957 struct new_mapping *m = get_next_mapping(pool); 935 struct new_mapping *m = get_next_mapping(pool);
958 936
959 INIT_LIST_HEAD(&m->list); 937 INIT_LIST_HEAD(&m->list);
938 m->quiesced = 0;
960 m->prepared = 0; 939 m->prepared = 0;
961 m->tc = tc; 940 m->tc = tc;
962 m->virt_block = virt_block; 941 m->virt_block = virt_block;
@@ -965,7 +944,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
965 m->err = 0; 944 m->err = 0;
966 m->bio = NULL; 945 m->bio = NULL;
967 946
968 ds_add_work(&pool->ds, &m->list); 947 if (!ds_add_work(&pool->shared_read_ds, &m->list))
948 m->quiesced = 1;
969 949
970 /* 950 /*
971 * IO to pool_dev remaps to the pool target's data_dev. 951 * IO to pool_dev remaps to the pool target's data_dev.
@@ -974,9 +954,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
974 * bio immediately. Otherwise we use kcopyd to clone the data first. 954 * bio immediately. Otherwise we use kcopyd to clone the data first.
975 */ 955 */
976 if (io_overwrites_block(pool, bio)) { 956 if (io_overwrites_block(pool, bio)) {
957 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
958 h->overwrite_mapping = m;
977 m->bio = bio; 959 m->bio = bio;
978 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 960 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
979 dm_get_mapinfo(bio)->ptr = m;
980 remap_and_issue(tc, bio, data_dest); 961 remap_and_issue(tc, bio, data_dest);
981 } else { 962 } else {
982 struct dm_io_region from, to; 963 struct dm_io_region from, to;
@@ -1023,6 +1004,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1023 struct new_mapping *m = get_next_mapping(pool); 1004 struct new_mapping *m = get_next_mapping(pool);
1024 1005
1025 INIT_LIST_HEAD(&m->list); 1006 INIT_LIST_HEAD(&m->list);
1007 m->quiesced = 1;
1026 m->prepared = 0; 1008 m->prepared = 0;
1027 m->tc = tc; 1009 m->tc = tc;
1028 m->virt_block = virt_block; 1010 m->virt_block = virt_block;
@@ -1040,9 +1022,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1040 process_prepared_mapping(m); 1022 process_prepared_mapping(m);
1041 1023
1042 else if (io_overwrites_block(pool, bio)) { 1024 else if (io_overwrites_block(pool, bio)) {
1025 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1026 h->overwrite_mapping = m;
1043 m->bio = bio; 1027 m->bio = bio;
1044 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1028 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1045 dm_get_mapinfo(bio)->ptr = m;
1046 remap_and_issue(tc, bio, data_block); 1029 remap_and_issue(tc, bio, data_block);
1047 1030
1048 } else { 1031 } else {
@@ -1129,7 +1112,8 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1129 */ 1112 */
1130static void retry_on_resume(struct bio *bio) 1113static void retry_on_resume(struct bio *bio)
1131{ 1114{
1132 struct thin_c *tc = dm_get_mapinfo(bio)->ptr; 1115 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1116 struct thin_c *tc = h->tc;
1133 struct pool *pool = tc->pool; 1117 struct pool *pool = tc->pool;
1134 unsigned long flags; 1118 unsigned long flags;
1135 1119
@@ -1195,13 +1179,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1195 if (bio_data_dir(bio) == WRITE) 1179 if (bio_data_dir(bio) == WRITE)
1196 break_sharing(tc, bio, block, &key, lookup_result, cell); 1180 break_sharing(tc, bio, block, &key, lookup_result, cell);
1197 else { 1181 else {
1198 struct endio_hook *h; 1182 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1199 h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1200 1183
1201 h->tc = tc; 1184 h->shared_read_entry = ds_inc(&pool->shared_read_ds);
1202 h->entry = ds_inc(&pool->ds);
1203 save_and_set_endio(bio, &h->saved_bi_end_io, shared_read_endio);
1204 dm_get_mapinfo(bio)->ptr = h;
1205 1185
1206 cell_release_singleton(cell, bio); 1186 cell_release_singleton(cell, bio);
1207 remap_and_issue(tc, bio, lookup_result->block); 1187 remap_and_issue(tc, bio, lookup_result->block);
@@ -1325,7 +1305,9 @@ static void process_deferred_bios(struct pool *pool)
1325 spin_unlock_irqrestore(&pool->lock, flags); 1305 spin_unlock_irqrestore(&pool->lock, flags);
1326 1306
1327 while ((bio = bio_list_pop(&bios))) { 1307 while ((bio = bio_list_pop(&bios))) {
1328 struct thin_c *tc = dm_get_mapinfo(bio)->ptr; 1308 struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1309 struct thin_c *tc = h->tc;
1310
1329 /* 1311 /*
1330 * If we've got no free new_mapping structs, and processing 1312 * If we've got no free new_mapping structs, and processing
1331 * this bio might require one, we pause until there are some 1313 * this bio might require one, we pause until there are some
@@ -1408,6 +1390,18 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1408 wake_worker(pool); 1390 wake_worker(pool);
1409} 1391}
1410 1392
1393static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1394{
1395 struct pool *pool = tc->pool;
1396 struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1397
1398 h->tc = tc;
1399 h->shared_read_entry = NULL;
1400 h->overwrite_mapping = NULL;
1401
1402 return h;
1403}
1404
1411/* 1405/*
1412 * Non-blocking function called from the thin target's map function. 1406 * Non-blocking function called from the thin target's map function.
1413 */ 1407 */
@@ -1420,11 +1414,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1420 struct dm_thin_device *td = tc->td; 1414 struct dm_thin_device *td = tc->td;
1421 struct dm_thin_lookup_result result; 1415 struct dm_thin_lookup_result result;
1422 1416
1423 /* 1417 map_context->ptr = thin_hook_bio(tc, bio);
1424 * Save the thin context for easy access from the deferred bio later.
1425 */
1426 map_context->ptr = tc;
1427
1428 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1418 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1429 thin_defer_bio(tc, bio); 1419 thin_defer_bio(tc, bio);
1430 return DM_MAPIO_SUBMITTED; 1420 return DM_MAPIO_SUBMITTED;
@@ -1604,7 +1594,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1604 pool->low_water_triggered = 0; 1594 pool->low_water_triggered = 0;
1605 pool->no_free_space = 0; 1595 pool->no_free_space = 0;
1606 bio_list_init(&pool->retry_on_resume_list); 1596 bio_list_init(&pool->retry_on_resume_list);
1607 ds_init(&pool->ds); 1597 ds_init(&pool->shared_read_ds);
1608 1598
1609 pool->next_mapping = NULL; 1599 pool->next_mapping = NULL;
1610 pool->mapping_pool = 1600 pool->mapping_pool =
@@ -2394,6 +2384,34 @@ static int thin_map(struct dm_target *ti, struct bio *bio,
2394 return thin_bio_map(ti, bio, map_context); 2384 return thin_bio_map(ti, bio, map_context);
2395} 2385}
2396 2386
2387static int thin_endio(struct dm_target *ti,
2388 struct bio *bio, int err,
2389 union map_info *map_context)
2390{
2391 unsigned long flags;
2392 struct endio_hook *h = map_context->ptr;
2393 struct list_head work;
2394 struct new_mapping *m, *tmp;
2395 struct pool *pool = h->tc->pool;
2396
2397 if (h->shared_read_entry) {
2398 INIT_LIST_HEAD(&work);
2399 ds_dec(h->shared_read_entry, &work);
2400
2401 spin_lock_irqsave(&pool->lock, flags);
2402 list_for_each_entry_safe(m, tmp, &work, list) {
2403 list_del(&m->list);
2404 m->quiesced = 1;
2405 __maybe_add_mapping(m);
2406 }
2407 spin_unlock_irqrestore(&pool->lock, flags);
2408 }
2409
2410 mempool_free(h, pool->endio_hook_pool);
2411
2412 return 0;
2413}
2414
2397static void thin_postsuspend(struct dm_target *ti) 2415static void thin_postsuspend(struct dm_target *ti)
2398{ 2416{
2399 if (dm_noflush_suspending(ti)) 2417 if (dm_noflush_suspending(ti))
@@ -2481,6 +2499,7 @@ static struct target_type thin_target = {
2481 .ctr = thin_ctr, 2499 .ctr = thin_ctr,
2482 .dtr = thin_dtr, 2500 .dtr = thin_dtr,
2483 .map = thin_map, 2501 .map = thin_map,
2502 .end_io = thin_endio,
2484 .postsuspend = thin_postsuspend, 2503 .postsuspend = thin_postsuspend,
2485 .status = thin_status, 2504 .status = thin_status,
2486 .iterate_devices = thin_iterate_devices, 2505 .iterate_devices = thin_iterate_devices,