aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2012-12-21 15:23:40 -0500
committerAlasdair G Kergon <agk@redhat.com>2012-12-21 15:23:40 -0500
commit59c3d2c6a12ff580b2c19c3925af4f4552639f8a (patch)
treec36bffdac55cf1bed47abd0cb194c2c93aa1815c /drivers
parent0045d61b5b7470f7228b35e1ab7139119e249503 (diff)
dm thin: dont use map_context
This patch removes endio_hook_pool from dm-thin and uses per-bio data instead. This patch removes any use of map_info in preparation for the next patch that removes map_info from bio-based device mapper. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-thin.c49
1 files changed, 13 insertions, 36 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4b940745ba9e..e7743c69a24c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -186,7 +186,6 @@ struct pool {
186 186
187 struct dm_thin_new_mapping *next_mapping; 187 struct dm_thin_new_mapping *next_mapping;
188 mempool_t *mapping_pool; 188 mempool_t *mapping_pool;
189 mempool_t *endio_hook_pool;
190 189
191 process_bio_fn process_bio; 190 process_bio_fn process_bio;
192 process_bio_fn process_discard; 191 process_bio_fn process_discard;
@@ -304,7 +303,7 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
304 bio_list_init(master); 303 bio_list_init(master);
305 304
306 while ((bio = bio_list_pop(&bios))) { 305 while ((bio = bio_list_pop(&bios))) {
307 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 306 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
308 307
309 if (h->tc == tc) 308 if (h->tc == tc)
310 bio_endio(bio, DM_ENDIO_REQUEUE); 309 bio_endio(bio, DM_ENDIO_REQUEUE);
@@ -375,7 +374,7 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
375 if (bio->bi_rw & REQ_DISCARD) 374 if (bio->bi_rw & REQ_DISCARD)
376 return; 375 return;
377 376
378 h = dm_get_mapinfo(bio)->ptr; 377 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
379 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); 378 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
380} 379}
381 380
@@ -485,7 +484,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
485static void overwrite_endio(struct bio *bio, int err) 484static void overwrite_endio(struct bio *bio, int err)
486{ 485{
487 unsigned long flags; 486 unsigned long flags;
488 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 487 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
489 struct dm_thin_new_mapping *m = h->overwrite_mapping; 488 struct dm_thin_new_mapping *m = h->overwrite_mapping;
490 struct pool *pool = m->tc->pool; 489 struct pool *pool = m->tc->pool;
491 490
@@ -714,7 +713,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
714 * bio immediately. Otherwise we use kcopyd to clone the data first. 713 * bio immediately. Otherwise we use kcopyd to clone the data first.
715 */ 714 */
716 if (io_overwrites_block(pool, bio)) { 715 if (io_overwrites_block(pool, bio)) {
717 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 716 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
718 717
719 h->overwrite_mapping = m; 718 h->overwrite_mapping = m;
720 m->bio = bio; 719 m->bio = bio;
@@ -784,7 +783,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
784 process_prepared_mapping(m); 783 process_prepared_mapping(m);
785 784
786 else if (io_overwrites_block(pool, bio)) { 785 else if (io_overwrites_block(pool, bio)) {
787 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 786 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
788 787
789 h->overwrite_mapping = m; 788 h->overwrite_mapping = m;
790 m->bio = bio; 789 m->bio = bio;
@@ -899,7 +898,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
899 */ 898 */
900static void retry_on_resume(struct bio *bio) 899static void retry_on_resume(struct bio *bio)
901{ 900{
902 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 901 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
903 struct thin_c *tc = h->tc; 902 struct thin_c *tc = h->tc;
904 struct pool *pool = tc->pool; 903 struct pool *pool = tc->pool;
905 unsigned long flags; 904 unsigned long flags;
@@ -1051,7 +1050,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1051 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1050 if (bio_data_dir(bio) == WRITE && bio->bi_size)
1052 break_sharing(tc, bio, block, &key, lookup_result, cell); 1051 break_sharing(tc, bio, block, &key, lookup_result, cell);
1053 else { 1052 else {
1054 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1053 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1055 1054
1056 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1055 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1057 inc_all_io_entry(pool, bio); 1056 inc_all_io_entry(pool, bio);
@@ -1226,7 +1225,7 @@ static void process_deferred_bios(struct pool *pool)
1226 spin_unlock_irqrestore(&pool->lock, flags); 1225 spin_unlock_irqrestore(&pool->lock, flags);
1227 1226
1228 while ((bio = bio_list_pop(&bios))) { 1227 while ((bio = bio_list_pop(&bios))) {
1229 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1228 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1230 struct thin_c *tc = h->tc; 1229 struct thin_c *tc = h->tc;
1231 1230
1232 /* 1231 /*
@@ -1359,17 +1358,14 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1359 wake_worker(pool); 1358 wake_worker(pool);
1360} 1359}
1361 1360
1362static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1361static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1363{ 1362{
1364 struct pool *pool = tc->pool; 1363 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1365 struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1366 1364
1367 h->tc = tc; 1365 h->tc = tc;
1368 h->shared_read_entry = NULL; 1366 h->shared_read_entry = NULL;
1369 h->all_io_entry = NULL; 1367 h->all_io_entry = NULL;
1370 h->overwrite_mapping = NULL; 1368 h->overwrite_mapping = NULL;
1371
1372 return h;
1373} 1369}
1374 1370
1375/* 1371/*
@@ -1386,7 +1382,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1386 struct dm_bio_prison_cell *cell1, *cell2; 1382 struct dm_bio_prison_cell *cell1, *cell2;
1387 struct dm_cell_key key; 1383 struct dm_cell_key key;
1388 1384
1389 map_context->ptr = thin_hook_bio(tc, bio); 1385 thin_hook_bio(tc, bio);
1390 1386
1391 if (get_pool_mode(tc->pool) == PM_FAIL) { 1387 if (get_pool_mode(tc->pool) == PM_FAIL) {
1392 bio_io_error(bio); 1388 bio_io_error(bio);
@@ -1595,14 +1591,12 @@ static void __pool_destroy(struct pool *pool)
1595 if (pool->next_mapping) 1591 if (pool->next_mapping)
1596 mempool_free(pool->next_mapping, pool->mapping_pool); 1592 mempool_free(pool->next_mapping, pool->mapping_pool);
1597 mempool_destroy(pool->mapping_pool); 1593 mempool_destroy(pool->mapping_pool);
1598 mempool_destroy(pool->endio_hook_pool);
1599 dm_deferred_set_destroy(pool->shared_read_ds); 1594 dm_deferred_set_destroy(pool->shared_read_ds);
1600 dm_deferred_set_destroy(pool->all_io_ds); 1595 dm_deferred_set_destroy(pool->all_io_ds);
1601 kfree(pool); 1596 kfree(pool);
1602} 1597}
1603 1598
1604static struct kmem_cache *_new_mapping_cache; 1599static struct kmem_cache *_new_mapping_cache;
1605static struct kmem_cache *_endio_hook_cache;
1606 1600
1607static struct pool *pool_create(struct mapped_device *pool_md, 1601static struct pool *pool_create(struct mapped_device *pool_md,
1608 struct block_device *metadata_dev, 1602 struct block_device *metadata_dev,
@@ -1696,13 +1690,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1696 goto bad_mapping_pool; 1690 goto bad_mapping_pool;
1697 } 1691 }
1698 1692
1699 pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1700 _endio_hook_cache);
1701 if (!pool->endio_hook_pool) {
1702 *error = "Error creating pool's endio_hook mempool";
1703 err_p = ERR_PTR(-ENOMEM);
1704 goto bad_endio_hook_pool;
1705 }
1706 pool->ref_count = 1; 1693 pool->ref_count = 1;
1707 pool->last_commit_jiffies = jiffies; 1694 pool->last_commit_jiffies = jiffies;
1708 pool->pool_md = pool_md; 1695 pool->pool_md = pool_md;
@@ -1711,8 +1698,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1711 1698
1712 return pool; 1699 return pool;
1713 1700
1714bad_endio_hook_pool:
1715 mempool_destroy(pool->mapping_pool);
1716bad_mapping_pool: 1701bad_mapping_pool:
1717 dm_deferred_set_destroy(pool->all_io_ds); 1702 dm_deferred_set_destroy(pool->all_io_ds);
1718bad_all_io_ds: 1703bad_all_io_ds:
@@ -2607,6 +2592,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2607 2592
2608 ti->num_flush_requests = 1; 2593 ti->num_flush_requests = 1;
2609 ti->flush_supported = true; 2594 ti->flush_supported = true;
2595 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2610 2596
2611 /* In case the pool supports discards, pass them on. */ 2597 /* In case the pool supports discards, pass them on. */
2612 if (tc->pool->pf.discard_enabled) { 2598 if (tc->pool->pf.discard_enabled) {
@@ -2653,7 +2639,7 @@ static int thin_endio(struct dm_target *ti,
2653 union map_info *map_context) 2639 union map_info *map_context)
2654{ 2640{
2655 unsigned long flags; 2641 unsigned long flags;
2656 struct dm_thin_endio_hook *h = map_context->ptr; 2642 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2657 struct list_head work; 2643 struct list_head work;
2658 struct dm_thin_new_mapping *m, *tmp; 2644 struct dm_thin_new_mapping *m, *tmp;
2659 struct pool *pool = h->tc->pool; 2645 struct pool *pool = h->tc->pool;
@@ -2683,8 +2669,6 @@ static int thin_endio(struct dm_target *ti,
2683 } 2669 }
2684 } 2670 }
2685 2671
2686 mempool_free(h, pool->endio_hook_pool);
2687
2688 return 0; 2672 return 0;
2689} 2673}
2690 2674
@@ -2813,14 +2797,8 @@ static int __init dm_thin_init(void)
2813 if (!_new_mapping_cache) 2797 if (!_new_mapping_cache)
2814 goto bad_new_mapping_cache; 2798 goto bad_new_mapping_cache;
2815 2799
2816 _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2817 if (!_endio_hook_cache)
2818 goto bad_endio_hook_cache;
2819
2820 return 0; 2800 return 0;
2821 2801
2822bad_endio_hook_cache:
2823 kmem_cache_destroy(_new_mapping_cache);
2824bad_new_mapping_cache: 2802bad_new_mapping_cache:
2825 dm_unregister_target(&pool_target); 2803 dm_unregister_target(&pool_target);
2826bad_pool_target: 2804bad_pool_target:
@@ -2835,7 +2813,6 @@ static void dm_thin_exit(void)
2835 dm_unregister_target(&pool_target); 2813 dm_unregister_target(&pool_target);
2836 2814
2837 kmem_cache_destroy(_new_mapping_cache); 2815 kmem_cache_destroy(_new_mapping_cache);
2838 kmem_cache_destroy(_endio_hook_cache);
2839} 2816}
2840 2817
2841module_init(dm_thin_init); 2818module_init(dm_thin_init);