diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/io.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 25 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 9 | ||||
-rw-r--r-- | drivers/md/dm-verity.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 171 | ||||
-rw-r--r-- | drivers/md/dm.h | 5 |
10 files changed, 65 insertions, 160 deletions
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index fa028fa82df4..cb64e64a4789 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -55,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl) | |||
55 | 55 | ||
56 | s->bio->bi_end_io = s->bi_end_io; | 56 | s->bio->bi_end_io = s->bi_end_io; |
57 | s->bio->bi_private = s->bi_private; | 57 | s->bio->bi_private = s->bi_private; |
58 | bio_endio_nodec(s->bio, 0); | 58 | bio_endio(s->bio, 0); |
59 | 59 | ||
60 | closure_debug_destroy(&s->cl); | 60 | closure_debug_destroy(&s->cl); |
61 | mempool_free(s, s->p->bio_split_hook); | 61 | mempool_free(s, s->p->bio_split_hook); |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index ab43faddb447..1616f668a4cb 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -619,7 +619,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) | |||
619 | bio->bi_end_io = request_endio; | 619 | bio->bi_end_io = request_endio; |
620 | bio->bi_private = &s->cl; | 620 | bio->bi_private = &s->cl; |
621 | 621 | ||
622 | atomic_set(&bio->bi_cnt, 3); | 622 | bio_cnt_set(bio, 3); |
623 | } | 623 | } |
624 | 624 | ||
625 | static void search_free(struct closure *cl) | 625 | static void search_free(struct closure *cl) |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 7755af351867..41b2594a80c6 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -86,12 +86,6 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) | |||
86 | { | 86 | { |
87 | bio->bi_end_io = h->bi_end_io; | 87 | bio->bi_end_io = h->bi_end_io; |
88 | bio->bi_private = h->bi_private; | 88 | bio->bi_private = h->bi_private; |
89 | |||
90 | /* | ||
91 | * Must bump bi_remaining to allow bio to complete with | ||
92 | * restored bi_end_io. | ||
93 | */ | ||
94 | atomic_inc(&bio->bi_remaining); | ||
95 | } | 89 | } |
96 | 90 | ||
97 | /*----------------------------------------------------------------*/ | 91 | /*----------------------------------------------------------------*/ |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 089d62751f7f..743fa9bbae9e 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1254,8 +1254,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
1254 | dm_bio_restore(bd, bio); | 1254 | dm_bio_restore(bd, bio); |
1255 | bio_record->details.bi_bdev = NULL; | 1255 | bio_record->details.bi_bdev = NULL; |
1256 | 1256 | ||
1257 | atomic_inc(&bio->bi_remaining); | ||
1258 | |||
1259 | queue_bio(ms, bio, rw); | 1257 | queue_bio(ms, bio, rw); |
1260 | return DM_ENDIO_INCOMPLETE; | 1258 | return DM_ENDIO_INCOMPLETE; |
1261 | } | 1259 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f83a0f3fc365..7c82d3ccce87 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1478,7 +1478,6 @@ out: | |||
1478 | if (full_bio) { | 1478 | if (full_bio) { |
1479 | full_bio->bi_end_io = pe->full_bio_end_io; | 1479 | full_bio->bi_end_io = pe->full_bio_end_io; |
1480 | full_bio->bi_private = pe->full_bio_private; | 1480 | full_bio->bi_private = pe->full_bio_private; |
1481 | atomic_inc(&full_bio->bi_remaining); | ||
1482 | } | 1481 | } |
1483 | increment_pending_exceptions_done_count(); | 1482 | increment_pending_exceptions_done_count(); |
1484 | 1483 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 16ba55ad7089..a5f94125ad01 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -942,21 +942,28 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * | |||
942 | { | 942 | { |
943 | unsigned type = dm_table_get_type(t); | 943 | unsigned type = dm_table_get_type(t); |
944 | unsigned per_bio_data_size = 0; | 944 | unsigned per_bio_data_size = 0; |
945 | struct dm_target *tgt; | ||
946 | unsigned i; | 945 | unsigned i; |
947 | 946 | ||
948 | if (unlikely(type == DM_TYPE_NONE)) { | 947 | switch (type) { |
948 | case DM_TYPE_BIO_BASED: | ||
949 | for (i = 0; i < t->num_targets; i++) { | ||
950 | struct dm_target *tgt = t->targets + i; | ||
951 | |||
952 | per_bio_data_size = max(per_bio_data_size, | ||
953 | tgt->per_bio_data_size); | ||
954 | } | ||
955 | t->mempools = dm_alloc_bio_mempools(t->integrity_supported, | ||
956 | per_bio_data_size); | ||
957 | break; | ||
958 | case DM_TYPE_REQUEST_BASED: | ||
959 | case DM_TYPE_MQ_REQUEST_BASED: | ||
960 | t->mempools = dm_alloc_rq_mempools(md, type); | ||
961 | break; | ||
962 | default: | ||
949 | DMWARN("no table type is set, can't allocate mempools"); | 963 | DMWARN("no table type is set, can't allocate mempools"); |
950 | return -EINVAL; | 964 | return -EINVAL; |
951 | } | 965 | } |
952 | 966 | ||
953 | if (type == DM_TYPE_BIO_BASED) | ||
954 | for (i = 0; i < t->num_targets; i++) { | ||
955 | tgt = t->targets + i; | ||
956 | per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size); | ||
957 | } | ||
958 | |||
959 | t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size); | ||
960 | if (!t->mempools) | 967 | if (!t->mempools) |
961 | return -ENOMEM; | 968 | return -ENOMEM; |
962 | 969 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 921aafd12aee..e852602c0091 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -793,10 +793,9 @@ static void inc_remap_and_issue_cell(struct thin_c *tc, | |||
793 | 793 | ||
794 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | 794 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) |
795 | { | 795 | { |
796 | if (m->bio) { | 796 | if (m->bio) |
797 | m->bio->bi_end_io = m->saved_bi_end_io; | 797 | m->bio->bi_end_io = m->saved_bi_end_io; |
798 | atomic_inc(&m->bio->bi_remaining); | 798 | |
799 | } | ||
800 | cell_error(m->tc->pool, m->cell); | 799 | cell_error(m->tc->pool, m->cell); |
801 | list_del(&m->list); | 800 | list_del(&m->list); |
802 | mempool_free(m, m->tc->pool->mapping_pool); | 801 | mempool_free(m, m->tc->pool->mapping_pool); |
@@ -810,10 +809,8 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
810 | int r; | 809 | int r; |
811 | 810 | ||
812 | bio = m->bio; | 811 | bio = m->bio; |
813 | if (bio) { | 812 | if (bio) |
814 | bio->bi_end_io = m->saved_bi_end_io; | 813 | bio->bi_end_io = m->saved_bi_end_io; |
815 | atomic_inc(&bio->bi_remaining); | ||
816 | } | ||
817 | 814 | ||
818 | if (m->err) { | 815 | if (m->err) { |
819 | cell_error(pool, m->cell); | 816 | cell_error(pool, m->cell); |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 66616db33e6f..bb9c6a00e4b0 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -459,7 +459,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error) | |||
459 | bio->bi_end_io = io->orig_bi_end_io; | 459 | bio->bi_end_io = io->orig_bi_end_io; |
460 | bio->bi_private = io->orig_bi_private; | 460 | bio->bi_private = io->orig_bi_private; |
461 | 461 | ||
462 | bio_endio_nodec(bio, error); | 462 | bio_endio(bio, error); |
463 | } | 463 | } |
464 | 464 | ||
465 | static void verity_work(struct work_struct *w) | 465 | static void verity_work(struct work_struct *w) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2caf492890d6..4d6f089a0e9e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -990,57 +990,6 @@ static void clone_endio(struct bio *bio, int error) | |||
990 | dec_pending(io, error); | 990 | dec_pending(io, error); |
991 | } | 991 | } |
992 | 992 | ||
993 | /* | ||
994 | * Partial completion handling for request-based dm | ||
995 | */ | ||
996 | static void end_clone_bio(struct bio *clone, int error) | ||
997 | { | ||
998 | struct dm_rq_clone_bio_info *info = | ||
999 | container_of(clone, struct dm_rq_clone_bio_info, clone); | ||
1000 | struct dm_rq_target_io *tio = info->tio; | ||
1001 | struct bio *bio = info->orig; | ||
1002 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | ||
1003 | |||
1004 | bio_put(clone); | ||
1005 | |||
1006 | if (tio->error) | ||
1007 | /* | ||
1008 | * An error has already been detected on the request. | ||
1009 | * Once error occurred, just let clone->end_io() handle | ||
1010 | * the remainder. | ||
1011 | */ | ||
1012 | return; | ||
1013 | else if (error) { | ||
1014 | /* | ||
1015 | * Don't notice the error to the upper layer yet. | ||
1016 | * The error handling decision is made by the target driver, | ||
1017 | * when the request is completed. | ||
1018 | */ | ||
1019 | tio->error = error; | ||
1020 | return; | ||
1021 | } | ||
1022 | |||
1023 | /* | ||
1024 | * I/O for the bio successfully completed. | ||
1025 | * Notice the data completion to the upper layer. | ||
1026 | */ | ||
1027 | |||
1028 | /* | ||
1029 | * bios are processed from the head of the list. | ||
1030 | * So the completing bio should always be rq->bio. | ||
1031 | * If it's not, something wrong is happening. | ||
1032 | */ | ||
1033 | if (tio->orig->bio != bio) | ||
1034 | DMERR("bio completion is going in the middle of the request"); | ||
1035 | |||
1036 | /* | ||
1037 | * Update the original request. | ||
1038 | * Do not use blk_end_request() here, because it may complete | ||
1039 | * the original request before the clone, and break the ordering. | ||
1040 | */ | ||
1041 | blk_update_request(tio->orig, 0, nr_bytes); | ||
1042 | } | ||
1043 | |||
1044 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | 993 | static struct dm_rq_target_io *tio_from_request(struct request *rq) |
1045 | { | 994 | { |
1046 | return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); | 995 | return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); |
@@ -1087,8 +1036,6 @@ static void free_rq_clone(struct request *clone) | |||
1087 | struct dm_rq_target_io *tio = clone->end_io_data; | 1036 | struct dm_rq_target_io *tio = clone->end_io_data; |
1088 | struct mapped_device *md = tio->md; | 1037 | struct mapped_device *md = tio->md; |
1089 | 1038 | ||
1090 | blk_rq_unprep_clone(clone); | ||
1091 | |||
1092 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) | 1039 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) |
1093 | /* stacked on blk-mq queue(s) */ | 1040 | /* stacked on blk-mq queue(s) */ |
1094 | tio->ti->type->release_clone_rq(clone); | 1041 | tio->ti->type->release_clone_rq(clone); |
@@ -1827,39 +1774,13 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq) | |||
1827 | dm_complete_request(rq, r); | 1774 | dm_complete_request(rq, r); |
1828 | } | 1775 | } |
1829 | 1776 | ||
1830 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | 1777 | static void setup_clone(struct request *clone, struct request *rq, |
1831 | void *data) | 1778 | struct dm_rq_target_io *tio) |
1832 | { | 1779 | { |
1833 | struct dm_rq_target_io *tio = data; | 1780 | blk_rq_prep_clone(clone, rq); |
1834 | struct dm_rq_clone_bio_info *info = | ||
1835 | container_of(bio, struct dm_rq_clone_bio_info, clone); | ||
1836 | |||
1837 | info->orig = bio_orig; | ||
1838 | info->tio = tio; | ||
1839 | bio->bi_end_io = end_clone_bio; | ||
1840 | |||
1841 | return 0; | ||
1842 | } | ||
1843 | |||
1844 | static int setup_clone(struct request *clone, struct request *rq, | ||
1845 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | ||
1846 | { | ||
1847 | int r; | ||
1848 | |||
1849 | r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, | ||
1850 | dm_rq_bio_constructor, tio); | ||
1851 | if (r) | ||
1852 | return r; | ||
1853 | |||
1854 | clone->cmd = rq->cmd; | ||
1855 | clone->cmd_len = rq->cmd_len; | ||
1856 | clone->sense = rq->sense; | ||
1857 | clone->end_io = end_clone_request; | 1781 | clone->end_io = end_clone_request; |
1858 | clone->end_io_data = tio; | 1782 | clone->end_io_data = tio; |
1859 | |||
1860 | tio->clone = clone; | 1783 | tio->clone = clone; |
1861 | |||
1862 | return 0; | ||
1863 | } | 1784 | } |
1864 | 1785 | ||
1865 | static struct request *clone_rq(struct request *rq, struct mapped_device *md, | 1786 | static struct request *clone_rq(struct request *rq, struct mapped_device *md, |
@@ -1880,12 +1801,7 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md, | |||
1880 | clone = tio->clone; | 1801 | clone = tio->clone; |
1881 | 1802 | ||
1882 | blk_rq_init(NULL, clone); | 1803 | blk_rq_init(NULL, clone); |
1883 | if (setup_clone(clone, rq, tio, gfp_mask)) { | 1804 | setup_clone(clone, rq, tio); |
1884 | /* -ENOMEM */ | ||
1885 | if (alloc_clone) | ||
1886 | free_clone_request(md, clone); | ||
1887 | return NULL; | ||
1888 | } | ||
1889 | 1805 | ||
1890 | return clone; | 1806 | return clone; |
1891 | } | 1807 | } |
@@ -1979,11 +1895,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, | |||
1979 | } | 1895 | } |
1980 | if (r != DM_MAPIO_REMAPPED) | 1896 | if (r != DM_MAPIO_REMAPPED) |
1981 | return r; | 1897 | return r; |
1982 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { | 1898 | setup_clone(clone, rq, tio); |
1983 | /* -ENOMEM */ | ||
1984 | ti->type->release_clone_rq(clone); | ||
1985 | return DM_MAPIO_REQUEUE; | ||
1986 | } | ||
1987 | } | 1899 | } |
1988 | 1900 | ||
1989 | switch (r) { | 1901 | switch (r) { |
@@ -2437,8 +2349,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
2437 | goto out; | 2349 | goto out; |
2438 | } | 2350 | } |
2439 | 2351 | ||
2440 | BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); | ||
2441 | |||
2442 | md->io_pool = p->io_pool; | 2352 | md->io_pool = p->io_pool; |
2443 | p->io_pool = NULL; | 2353 | p->io_pool = NULL; |
2444 | md->rq_pool = p->rq_pool; | 2354 | md->rq_pool = p->rq_pool; |
@@ -3544,48 +3454,23 @@ int dm_noflush_suspending(struct dm_target *ti) | |||
3544 | } | 3454 | } |
3545 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); | 3455 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); |
3546 | 3456 | ||
3547 | struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, | 3457 | struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, |
3548 | unsigned integrity, unsigned per_bio_data_size) | 3458 | unsigned per_bio_data_size) |
3549 | { | 3459 | { |
3550 | struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); | 3460 | struct dm_md_mempools *pools; |
3551 | struct kmem_cache *cachep = NULL; | 3461 | unsigned int pool_size = dm_get_reserved_bio_based_ios(); |
3552 | unsigned int pool_size = 0; | ||
3553 | unsigned int front_pad; | 3462 | unsigned int front_pad; |
3554 | 3463 | ||
3464 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); | ||
3555 | if (!pools) | 3465 | if (!pools) |
3556 | return NULL; | 3466 | return NULL; |
3557 | 3467 | ||
3558 | type = filter_md_type(type, md); | 3468 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + |
3469 | offsetof(struct dm_target_io, clone); | ||
3559 | 3470 | ||
3560 | switch (type) { | 3471 | pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); |
3561 | case DM_TYPE_BIO_BASED: | 3472 | if (!pools->io_pool) |
3562 | cachep = _io_cache; | 3473 | goto out; |
3563 | pool_size = dm_get_reserved_bio_based_ios(); | ||
3564 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); | ||
3565 | break; | ||
3566 | case DM_TYPE_REQUEST_BASED: | ||
3567 | cachep = _rq_tio_cache; | ||
3568 | pool_size = dm_get_reserved_rq_based_ios(); | ||
3569 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); | ||
3570 | if (!pools->rq_pool) | ||
3571 | goto out; | ||
3572 | /* fall through to setup remaining rq-based pools */ | ||
3573 | case DM_TYPE_MQ_REQUEST_BASED: | ||
3574 | if (!pool_size) | ||
3575 | pool_size = dm_get_reserved_rq_based_ios(); | ||
3576 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); | ||
3577 | /* per_bio_data_size is not used. See __bind_mempools(). */ | ||
3578 | WARN_ON(per_bio_data_size != 0); | ||
3579 | break; | ||
3580 | default: | ||
3581 | BUG(); | ||
3582 | } | ||
3583 | |||
3584 | if (cachep) { | ||
3585 | pools->io_pool = mempool_create_slab_pool(pool_size, cachep); | ||
3586 | if (!pools->io_pool) | ||
3587 | goto out; | ||
3588 | } | ||
3589 | 3474 | ||
3590 | pools->bs = bioset_create_nobvec(pool_size, front_pad); | 3475 | pools->bs = bioset_create_nobvec(pool_size, front_pad); |
3591 | if (!pools->bs) | 3476 | if (!pools->bs) |
@@ -3595,10 +3480,34 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t | |||
3595 | goto out; | 3480 | goto out; |
3596 | 3481 | ||
3597 | return pools; | 3482 | return pools; |
3598 | |||
3599 | out: | 3483 | out: |
3600 | dm_free_md_mempools(pools); | 3484 | dm_free_md_mempools(pools); |
3485 | return NULL; | ||
3486 | } | ||
3487 | |||
3488 | struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, | ||
3489 | unsigned type) | ||
3490 | { | ||
3491 | unsigned int pool_size = dm_get_reserved_rq_based_ios(); | ||
3492 | struct dm_md_mempools *pools; | ||
3493 | |||
3494 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); | ||
3495 | if (!pools) | ||
3496 | return NULL; | ||
3497 | |||
3498 | if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) { | ||
3499 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); | ||
3500 | if (!pools->rq_pool) | ||
3501 | goto out; | ||
3502 | } | ||
3601 | 3503 | ||
3504 | pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache); | ||
3505 | if (!pools->io_pool) | ||
3506 | goto out; | ||
3507 | |||
3508 | return pools; | ||
3509 | out: | ||
3510 | dm_free_md_mempools(pools); | ||
3602 | return NULL; | 3511 | return NULL; |
3603 | } | 3512 | } |
3604 | 3513 | ||
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 6123c2bf9150..e6e66d087b26 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -222,8 +222,9 @@ void dm_kcopyd_exit(void); | |||
222 | /* | 222 | /* |
223 | * Mempool operations | 223 | * Mempool operations |
224 | */ | 224 | */ |
225 | struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, | 225 | struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, |
226 | unsigned integrity, unsigned per_bio_data_size); | 226 | unsigned per_bio_data_size); |
227 | struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, unsigned type); | ||
227 | void dm_free_md_mempools(struct dm_md_mempools *pools); | 228 | void dm_free_md_mempools(struct dm_md_mempools *pools); |
228 | 229 | ||
229 | /* | 230 | /* |