diff options
author | Jun'ichi Nomura <j-nomura@ce.jp.nec.com> | 2013-03-01 17:45:48 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2013-03-01 17:45:48 -0500 |
commit | 5f01520415e82f8e354807484ef842335070a3bd (patch) | |
tree | 0100b4309d01d22bd3726b20d76b5cd56c64df5e | |
parent | 23e5083b4d47e778bf7983329989dab7543def14 (diff) |
dm: merge io_pool and tio_pool
This patch merges io_pool and tio_pool into io_pool and cleans up
related functions.
Though device-mapper used to have 2 pools of objects for each dm device,
the use of bioset frontbad for per-bio data has shrunk the number of
pools to 1 for both bio-based and request-based device types.
(See c0820cf5 "dm: introduce per_bio_data" and
94818742 "dm: Use bioset's front_pad for dm_rq_clone_bio_info")
So dm no longer has to maintain 2 different pointers.
No functional changes.
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r-- | drivers/md/dm.c | 76 |
1 files changed, 27 insertions, 49 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 031f1f1c711b..e417cf0a69ef 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -163,7 +163,6 @@ struct mapped_device { | |||
163 | * io objects are allocated from here. | 163 | * io objects are allocated from here. |
164 | */ | 164 | */ |
165 | mempool_t *io_pool; | 165 | mempool_t *io_pool; |
166 | mempool_t *tio_pool; | ||
167 | 166 | ||
168 | struct bio_set *bs; | 167 | struct bio_set *bs; |
169 | 168 | ||
@@ -197,7 +196,6 @@ struct mapped_device { | |||
197 | */ | 196 | */ |
198 | struct dm_md_mempools { | 197 | struct dm_md_mempools { |
199 | mempool_t *io_pool; | 198 | mempool_t *io_pool; |
200 | mempool_t *tio_pool; | ||
201 | struct bio_set *bs; | 199 | struct bio_set *bs; |
202 | }; | 200 | }; |
203 | 201 | ||
@@ -435,12 +433,12 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | |||
435 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, | 433 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, |
436 | gfp_t gfp_mask) | 434 | gfp_t gfp_mask) |
437 | { | 435 | { |
438 | return mempool_alloc(md->tio_pool, gfp_mask); | 436 | return mempool_alloc(md->io_pool, gfp_mask); |
439 | } | 437 | } |
440 | 438 | ||
441 | static void free_rq_tio(struct dm_rq_target_io *tio) | 439 | static void free_rq_tio(struct dm_rq_target_io *tio) |
442 | { | 440 | { |
443 | mempool_free(tio, tio->md->tio_pool); | 441 | mempool_free(tio, tio->md->io_pool); |
444 | } | 442 | } |
445 | 443 | ||
446 | static int md_in_flight(struct mapped_device *md) | 444 | static int md_in_flight(struct mapped_device *md) |
@@ -1949,8 +1947,6 @@ static void free_dev(struct mapped_device *md) | |||
1949 | unlock_fs(md); | 1947 | unlock_fs(md); |
1950 | bdput(md->bdev); | 1948 | bdput(md->bdev); |
1951 | destroy_workqueue(md->wq); | 1949 | destroy_workqueue(md->wq); |
1952 | if (md->tio_pool) | ||
1953 | mempool_destroy(md->tio_pool); | ||
1954 | if (md->io_pool) | 1950 | if (md->io_pool) |
1955 | mempool_destroy(md->io_pool); | 1951 | mempool_destroy(md->io_pool); |
1956 | if (md->bs) | 1952 | if (md->bs) |
@@ -1973,7 +1969,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
1973 | { | 1969 | { |
1974 | struct dm_md_mempools *p = dm_table_get_md_mempools(t); | 1970 | struct dm_md_mempools *p = dm_table_get_md_mempools(t); |
1975 | 1971 | ||
1976 | if (md->bs) { | 1972 | if (md->io_pool && md->bs) { |
1977 | /* The md already has necessary mempools. */ | 1973 | /* The md already has necessary mempools. */ |
1978 | if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { | 1974 | if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { |
1979 | /* | 1975 | /* |
@@ -1984,7 +1980,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
1984 | md->bs = p->bs; | 1980 | md->bs = p->bs; |
1985 | p->bs = NULL; | 1981 | p->bs = NULL; |
1986 | } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { | 1982 | } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { |
1987 | BUG_ON(!md->tio_pool); | ||
1988 | /* | 1983 | /* |
1989 | * There's no need to reload with request-based dm | 1984 | * There's no need to reload with request-based dm |
1990 | * because the size of front_pad doesn't change. | 1985 | * because the size of front_pad doesn't change. |
@@ -1997,12 +1992,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
1997 | goto out; | 1992 | goto out; |
1998 | } | 1993 | } |
1999 | 1994 | ||
2000 | BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); | 1995 | BUG_ON(!p || md->io_pool || md->bs); |
2001 | 1996 | ||
2002 | md->io_pool = p->io_pool; | 1997 | md->io_pool = p->io_pool; |
2003 | p->io_pool = NULL; | 1998 | p->io_pool = NULL; |
2004 | md->tio_pool = p->tio_pool; | ||
2005 | p->tio_pool = NULL; | ||
2006 | md->bs = p->bs; | 1999 | md->bs = p->bs; |
2007 | p->bs = NULL; | 2000 | p->bs = NULL; |
2008 | 2001 | ||
@@ -2759,54 +2752,42 @@ EXPORT_SYMBOL_GPL(dm_noflush_suspending); | |||
2759 | 2752 | ||
2760 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) | 2753 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) |
2761 | { | 2754 | { |
2762 | struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); | 2755 | struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); |
2763 | unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; | 2756 | struct kmem_cache *cachep; |
2757 | unsigned int pool_size; | ||
2758 | unsigned int front_pad; | ||
2764 | 2759 | ||
2765 | if (!pools) | 2760 | if (!pools) |
2766 | return NULL; | 2761 | return NULL; |
2767 | 2762 | ||
2768 | per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io)); | ||
2769 | |||
2770 | pools->io_pool = NULL; | ||
2771 | if (type == DM_TYPE_BIO_BASED) { | 2763 | if (type == DM_TYPE_BIO_BASED) { |
2772 | pools->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); | 2764 | cachep = _io_cache; |
2773 | if (!pools->io_pool) | 2765 | pool_size = 16; |
2774 | goto free_pools_and_out; | 2766 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); |
2775 | } | 2767 | } else if (type == DM_TYPE_REQUEST_BASED) { |
2768 | cachep = _rq_tio_cache; | ||
2769 | pool_size = MIN_IOS; | ||
2770 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); | ||
2771 | /* per_bio_data_size is not used. See __bind_mempools(). */ | ||
2772 | WARN_ON(per_bio_data_size != 0); | ||
2773 | } else | ||
2774 | goto out; | ||
2776 | 2775 | ||
2777 | pools->tio_pool = NULL; | 2776 | pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); |
2778 | if (type == DM_TYPE_REQUEST_BASED) { | 2777 | if (!pools->io_pool) |
2779 | pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); | 2778 | goto out; |
2780 | if (!pools->tio_pool) | ||
2781 | goto free_io_pool_and_out; | ||
2782 | } | ||
2783 | 2779 | ||
2784 | pools->bs = (type == DM_TYPE_BIO_BASED) ? | 2780 | pools->bs = bioset_create(pool_size, front_pad); |
2785 | bioset_create(pool_size, | ||
2786 | per_bio_data_size + offsetof(struct dm_target_io, clone)) : | ||
2787 | bioset_create(pool_size, | ||
2788 | offsetof(struct dm_rq_clone_bio_info, clone)); | ||
2789 | if (!pools->bs) | 2781 | if (!pools->bs) |
2790 | goto free_tio_pool_and_out; | 2782 | goto out; |
2791 | 2783 | ||
2792 | if (integrity && bioset_integrity_create(pools->bs, pool_size)) | 2784 | if (integrity && bioset_integrity_create(pools->bs, pool_size)) |
2793 | goto free_bioset_and_out; | 2785 | goto out; |
2794 | 2786 | ||
2795 | return pools; | 2787 | return pools; |
2796 | 2788 | ||
2797 | free_bioset_and_out: | 2789 | out: |
2798 | bioset_free(pools->bs); | 2790 | dm_free_md_mempools(pools); |
2799 | |||
2800 | free_tio_pool_and_out: | ||
2801 | if (pools->tio_pool) | ||
2802 | mempool_destroy(pools->tio_pool); | ||
2803 | |||
2804 | free_io_pool_and_out: | ||
2805 | if (pools->io_pool) | ||
2806 | mempool_destroy(pools->io_pool); | ||
2807 | |||
2808 | free_pools_and_out: | ||
2809 | kfree(pools); | ||
2810 | 2791 | ||
2811 | return NULL; | 2792 | return NULL; |
2812 | } | 2793 | } |
@@ -2819,9 +2800,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) | |||
2819 | if (pools->io_pool) | 2800 | if (pools->io_pool) |
2820 | mempool_destroy(pools->io_pool); | 2801 | mempool_destroy(pools->io_pool); |
2821 | 2802 | ||
2822 | if (pools->tio_pool) | ||
2823 | mempool_destroy(pools->tio_pool); | ||
2824 | |||
2825 | if (pools->bs) | 2803 | if (pools->bs) |
2826 | bioset_free(pools->bs); | 2804 | bioset_free(pools->bs); |
2827 | 2805 | ||