diff options
author | Mike Snitzer <snitzer@redhat.com> | 2014-12-05 17:11:05 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2015-02-09 13:06:47 -0500 |
commit | 1ae49ea2cf3ef097d4496981261a400f1f988b84 (patch) | |
tree | 816a2c908e575cc31c55af8a1cfec5dcee65e5e9 /drivers/md | |
parent | dbf9782c1078c537831201c73ac60c9623ae9370 (diff) |
dm: split request structure out from dm_rq_target_io structure
Request-based DM support for blk-mq devices requires that
dm_rq_target_io structures not be allocated with an embedded request
structure. The request-based DM target (e.g. dm-multipath) must
allocate the request from the blk-mq devices' request_queue using
blk_get_request().
The unfortunate side-effect of this change is old-style request-based DM
support will no longer use contiguous memory for the dm_rq_target_io and
request structures for each clone.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 70 |
1 files changed, 61 insertions, 9 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5920a9af7bd6..9a857e337902 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -78,7 +78,7 @@ struct dm_io { | |||
78 | struct dm_rq_target_io { | 78 | struct dm_rq_target_io { |
79 | struct mapped_device *md; | 79 | struct mapped_device *md; |
80 | struct dm_target *ti; | 80 | struct dm_target *ti; |
81 | struct request *orig, clone; | 81 | struct request *orig, *clone; |
82 | int error; | 82 | int error; |
83 | union map_info info; | 83 | union map_info info; |
84 | }; | 84 | }; |
@@ -179,6 +179,7 @@ struct mapped_device { | |||
179 | * io objects are allocated from here. | 179 | * io objects are allocated from here. |
180 | */ | 180 | */ |
181 | mempool_t *io_pool; | 181 | mempool_t *io_pool; |
182 | mempool_t *rq_pool; | ||
182 | 183 | ||
183 | struct bio_set *bs; | 184 | struct bio_set *bs; |
184 | 185 | ||
@@ -214,6 +215,7 @@ struct mapped_device { | |||
214 | */ | 215 | */ |
215 | struct dm_md_mempools { | 216 | struct dm_md_mempools { |
216 | mempool_t *io_pool; | 217 | mempool_t *io_pool; |
218 | mempool_t *rq_pool; | ||
217 | struct bio_set *bs; | 219 | struct bio_set *bs; |
218 | }; | 220 | }; |
219 | 221 | ||
@@ -228,6 +230,7 @@ struct table_device { | |||
228 | #define RESERVED_MAX_IOS 1024 | 230 | #define RESERVED_MAX_IOS 1024 |
229 | static struct kmem_cache *_io_cache; | 231 | static struct kmem_cache *_io_cache; |
230 | static struct kmem_cache *_rq_tio_cache; | 232 | static struct kmem_cache *_rq_tio_cache; |
233 | static struct kmem_cache *_rq_cache; | ||
231 | 234 | ||
232 | /* | 235 | /* |
233 | * Bio-based DM's mempools' reserved IOs set by the user. | 236 | * Bio-based DM's mempools' reserved IOs set by the user. |
@@ -285,9 +288,14 @@ static int __init local_init(void) | |||
285 | if (!_rq_tio_cache) | 288 | if (!_rq_tio_cache) |
286 | goto out_free_io_cache; | 289 | goto out_free_io_cache; |
287 | 290 | ||
291 | _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), | ||
292 | __alignof__(struct request), 0, NULL); | ||
293 | if (!_rq_cache) | ||
294 | goto out_free_rq_tio_cache; | ||
295 | |||
288 | r = dm_uevent_init(); | 296 | r = dm_uevent_init(); |
289 | if (r) | 297 | if (r) |
290 | goto out_free_rq_tio_cache; | 298 | goto out_free_rq_cache; |
291 | 299 | ||
292 | deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); | 300 | deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); |
293 | if (!deferred_remove_workqueue) { | 301 | if (!deferred_remove_workqueue) { |
@@ -309,6 +317,8 @@ out_free_workqueue: | |||
309 | destroy_workqueue(deferred_remove_workqueue); | 317 | destroy_workqueue(deferred_remove_workqueue); |
310 | out_uevent_exit: | 318 | out_uevent_exit: |
311 | dm_uevent_exit(); | 319 | dm_uevent_exit(); |
320 | out_free_rq_cache: | ||
321 | kmem_cache_destroy(_rq_cache); | ||
312 | out_free_rq_tio_cache: | 322 | out_free_rq_tio_cache: |
313 | kmem_cache_destroy(_rq_tio_cache); | 323 | kmem_cache_destroy(_rq_tio_cache); |
314 | out_free_io_cache: | 324 | out_free_io_cache: |
@@ -322,6 +332,7 @@ static void local_exit(void) | |||
322 | flush_scheduled_work(); | 332 | flush_scheduled_work(); |
323 | destroy_workqueue(deferred_remove_workqueue); | 333 | destroy_workqueue(deferred_remove_workqueue); |
324 | 334 | ||
335 | kmem_cache_destroy(_rq_cache); | ||
325 | kmem_cache_destroy(_rq_tio_cache); | 336 | kmem_cache_destroy(_rq_tio_cache); |
326 | kmem_cache_destroy(_io_cache); | 337 | kmem_cache_destroy(_io_cache); |
327 | unregister_blkdev(_major, _name); | 338 | unregister_blkdev(_major, _name); |
@@ -574,6 +585,17 @@ static void free_rq_tio(struct dm_rq_target_io *tio) | |||
574 | mempool_free(tio, tio->md->io_pool); | 585 | mempool_free(tio, tio->md->io_pool); |
575 | } | 586 | } |
576 | 587 | ||
588 | static struct request *alloc_clone_request(struct mapped_device *md, | ||
589 | gfp_t gfp_mask) | ||
590 | { | ||
591 | return mempool_alloc(md->rq_pool, gfp_mask); | ||
592 | } | ||
593 | |||
594 | static void free_clone_request(struct mapped_device *md, struct request *rq) | ||
595 | { | ||
596 | mempool_free(rq, md->rq_pool); | ||
597 | } | ||
598 | |||
577 | static int md_in_flight(struct mapped_device *md) | 599 | static int md_in_flight(struct mapped_device *md) |
578 | { | 600 | { |
579 | return atomic_read(&md->pending[READ]) + | 601 | return atomic_read(&md->pending[READ]) + |
@@ -1017,6 +1039,7 @@ static void free_rq_clone(struct request *clone) | |||
1017 | struct dm_rq_target_io *tio = clone->end_io_data; | 1039 | struct dm_rq_target_io *tio = clone->end_io_data; |
1018 | 1040 | ||
1019 | blk_rq_unprep_clone(clone); | 1041 | blk_rq_unprep_clone(clone); |
1042 | free_clone_request(tio->md, clone); | ||
1020 | free_rq_tio(tio); | 1043 | free_rq_tio(tio); |
1021 | } | 1044 | } |
1022 | 1045 | ||
@@ -1712,12 +1735,11 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | |||
1712 | } | 1735 | } |
1713 | 1736 | ||
1714 | static int setup_clone(struct request *clone, struct request *rq, | 1737 | static int setup_clone(struct request *clone, struct request *rq, |
1715 | struct dm_rq_target_io *tio) | 1738 | struct dm_rq_target_io *tio, gfp_t gfp_mask) |
1716 | { | 1739 | { |
1717 | int r; | 1740 | int r; |
1718 | 1741 | ||
1719 | blk_rq_init(NULL, clone); | 1742 | r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, |
1720 | r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, | ||
1721 | dm_rq_bio_constructor, tio); | 1743 | dm_rq_bio_constructor, tio); |
1722 | if (r) | 1744 | if (r) |
1723 | return r; | 1745 | return r; |
@@ -1728,9 +1750,29 @@ static int setup_clone(struct request *clone, struct request *rq, | |||
1728 | clone->end_io = end_clone_request; | 1750 | clone->end_io = end_clone_request; |
1729 | clone->end_io_data = tio; | 1751 | clone->end_io_data = tio; |
1730 | 1752 | ||
1753 | tio->clone = clone; | ||
1754 | |||
1731 | return 0; | 1755 | return 0; |
1732 | } | 1756 | } |
1733 | 1757 | ||
1758 | static struct request *__clone_rq(struct request *rq, struct mapped_device *md, | ||
1759 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | ||
1760 | { | ||
1761 | struct request *clone = alloc_clone_request(md, gfp_mask); | ||
1762 | |||
1763 | if (!clone) | ||
1764 | return NULL; | ||
1765 | |||
1766 | blk_rq_init(NULL, clone); | ||
1767 | if (setup_clone(clone, rq, tio, gfp_mask)) { | ||
1768 | /* -ENOMEM */ | ||
1769 | free_clone_request(md, clone); | ||
1770 | return NULL; | ||
1771 | } | ||
1772 | |||
1773 | return clone; | ||
1774 | } | ||
1775 | |||
1734 | static struct request *clone_rq(struct request *rq, struct mapped_device *md, | 1776 | static struct request *clone_rq(struct request *rq, struct mapped_device *md, |
1735 | gfp_t gfp_mask) | 1777 | gfp_t gfp_mask) |
1736 | { | 1778 | { |
@@ -1743,13 +1785,13 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md, | |||
1743 | 1785 | ||
1744 | tio->md = md; | 1786 | tio->md = md; |
1745 | tio->ti = NULL; | 1787 | tio->ti = NULL; |
1788 | tio->clone = NULL; | ||
1746 | tio->orig = rq; | 1789 | tio->orig = rq; |
1747 | tio->error = 0; | 1790 | tio->error = 0; |
1748 | memset(&tio->info, 0, sizeof(tio->info)); | 1791 | memset(&tio->info, 0, sizeof(tio->info)); |
1749 | 1792 | ||
1750 | clone = &tio->clone; | 1793 | clone = __clone_rq(rq, md, tio, GFP_ATOMIC); |
1751 | if (setup_clone(clone, rq, tio)) { | 1794 | if (!clone) { |
1752 | /* -ENOMEM */ | ||
1753 | free_rq_tio(tio); | 1795 | free_rq_tio(tio); |
1754 | return NULL; | 1796 | return NULL; |
1755 | } | 1797 | } |
@@ -2149,6 +2191,8 @@ static void free_dev(struct mapped_device *md) | |||
2149 | destroy_workqueue(md->wq); | 2191 | destroy_workqueue(md->wq); |
2150 | if (md->io_pool) | 2192 | if (md->io_pool) |
2151 | mempool_destroy(md->io_pool); | 2193 | mempool_destroy(md->io_pool); |
2194 | if (md->rq_pool) | ||
2195 | mempool_destroy(md->rq_pool); | ||
2152 | if (md->bs) | 2196 | if (md->bs) |
2153 | bioset_free(md->bs); | 2197 | bioset_free(md->bs); |
2154 | blk_integrity_unregister(md->disk); | 2198 | blk_integrity_unregister(md->disk); |
@@ -2195,10 +2239,12 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
2195 | goto out; | 2239 | goto out; |
2196 | } | 2240 | } |
2197 | 2241 | ||
2198 | BUG_ON(!p || md->io_pool || md->bs); | 2242 | BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); |
2199 | 2243 | ||
2200 | md->io_pool = p->io_pool; | 2244 | md->io_pool = p->io_pool; |
2201 | p->io_pool = NULL; | 2245 | p->io_pool = NULL; |
2246 | md->rq_pool = p->rq_pool; | ||
2247 | p->rq_pool = NULL; | ||
2202 | md->bs = p->bs; | 2248 | md->bs = p->bs; |
2203 | p->bs = NULL; | 2249 | p->bs = NULL; |
2204 | 2250 | ||
@@ -3129,6 +3175,9 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u | |||
3129 | } else if (type == DM_TYPE_REQUEST_BASED) { | 3175 | } else if (type == DM_TYPE_REQUEST_BASED) { |
3130 | cachep = _rq_tio_cache; | 3176 | cachep = _rq_tio_cache; |
3131 | pool_size = dm_get_reserved_rq_based_ios(); | 3177 | pool_size = dm_get_reserved_rq_based_ios(); |
3178 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); | ||
3179 | if (!pools->rq_pool) | ||
3180 | goto out; | ||
3132 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); | 3181 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); |
3133 | /* per_bio_data_size is not used. See __bind_mempools(). */ | 3182 | /* per_bio_data_size is not used. See __bind_mempools(). */ |
3134 | WARN_ON(per_bio_data_size != 0); | 3183 | WARN_ON(per_bio_data_size != 0); |
@@ -3162,6 +3211,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) | |||
3162 | if (pools->io_pool) | 3211 | if (pools->io_pool) |
3163 | mempool_destroy(pools->io_pool); | 3212 | mempool_destroy(pools->io_pool); |
3164 | 3213 | ||
3214 | if (pools->rq_pool) | ||
3215 | mempool_destroy(pools->rq_pool); | ||
3216 | |||
3165 | if (pools->bs) | 3217 | if (pools->bs) |
3166 | bioset_free(pools->bs); | 3218 | bioset_free(pools->bs); |
3167 | 3219 | ||