diff options
author | Mike Snitzer <snitzer@redhat.com> | 2015-06-26 09:42:57 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2015-06-26 10:11:07 -0400 |
commit | 4e6e36c3714364b65f2bfea8c73691c663891726 (patch) | |
tree | e5fd2763c7d873bf70457e447922060dd8446223 | |
parent | e262f34741522e0d821642e5449c6eeb512723fc (diff) |
Revert "dm: do not allocate any mempools for blk-mq request-based DM"
This reverts commit cbc4e3c1350beb47beab8f34ad9be3d34a20c705.
Reported-by: Junichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-table.c | 4 | ||||
-rw-r--r-- | drivers/md/dm.c | 69 |
2 files changed, 33 insertions, 40 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 85e1d39e9a38..a5f94125ad01 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -964,8 +964,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * | |||
964 | return -EINVAL; | 964 | return -EINVAL; |
965 | } | 965 | } |
966 | 966 | ||
967 | if (IS_ERR(t->mempools)) | 967 | if (!t->mempools) |
968 | return PTR_ERR(t->mempools); | 968 | return -ENOMEM; |
969 | 969 | ||
970 | return 0; | 970 | return 0; |
971 | } | 971 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 90dc49e3c78f..492181e16c69 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -2349,52 +2349,39 @@ static void free_dev(struct mapped_device *md) | |||
2349 | kfree(md); | 2349 | kfree(md); |
2350 | } | 2350 | } |
2351 | 2351 | ||
2352 | static unsigned filter_md_type(unsigned type, struct mapped_device *md) | ||
2353 | { | ||
2354 | if (type == DM_TYPE_BIO_BASED) | ||
2355 | return type; | ||
2356 | |||
2357 | return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; | ||
2358 | } | ||
2359 | |||
2360 | static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | 2352 | static void __bind_mempools(struct mapped_device *md, struct dm_table *t) |
2361 | { | 2353 | { |
2362 | struct dm_md_mempools *p = dm_table_get_md_mempools(t); | 2354 | struct dm_md_mempools *p = dm_table_get_md_mempools(t); |
2363 | 2355 | ||
2364 | switch (filter_md_type(dm_table_get_type(t), md)) { | 2356 | if (md->bs) { |
2365 | case DM_TYPE_BIO_BASED: | 2357 | /* The md already has necessary mempools. */ |
2366 | if (md->bs && md->io_pool) { | 2358 | if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { |
2367 | /* | 2359 | /* |
2368 | * This bio-based md already has necessary mempools. | ||
2369 | * Reload bioset because front_pad may have changed | 2360 | * Reload bioset because front_pad may have changed |
2370 | * because a different table was loaded. | 2361 | * because a different table was loaded. |
2371 | */ | 2362 | */ |
2372 | bioset_free(md->bs); | 2363 | bioset_free(md->bs); |
2373 | md->bs = p->bs; | 2364 | md->bs = p->bs; |
2374 | p->bs = NULL; | 2365 | p->bs = NULL; |
2375 | goto out; | ||
2376 | } | 2366 | } |
2377 | break; | 2367 | /* |
2378 | case DM_TYPE_REQUEST_BASED: | 2368 | * There's no need to reload with request-based dm |
2379 | if (md->rq_pool && md->io_pool) | 2369 | * because the size of front_pad doesn't change. |
2380 | /* | 2370 | * Note for future: If you are to reload bioset, |
2381 | * This request-based md already has necessary mempools. | 2371 | * prep-ed requests in the queue may refer |
2382 | */ | 2372 | * to bio from the old bioset, so you must walk |
2383 | goto out; | 2373 | * through the queue to unprep. |
2384 | break; | 2374 | */ |
2385 | case DM_TYPE_MQ_REQUEST_BASED: | 2375 | goto out; |
2386 | BUG_ON(p); /* No mempools needed */ | ||
2387 | return; | ||
2388 | } | 2376 | } |
2389 | 2377 | ||
2390 | BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); | ||
2391 | |||
2392 | md->io_pool = p->io_pool; | 2378 | md->io_pool = p->io_pool; |
2393 | p->io_pool = NULL; | 2379 | p->io_pool = NULL; |
2394 | md->rq_pool = p->rq_pool; | 2380 | md->rq_pool = p->rq_pool; |
2395 | p->rq_pool = NULL; | 2381 | p->rq_pool = NULL; |
2396 | md->bs = p->bs; | 2382 | md->bs = p->bs; |
2397 | p->bs = NULL; | 2383 | p->bs = NULL; |
2384 | |||
2398 | out: | 2385 | out: |
2399 | /* mempool bind completed, no longer need any mempools in the table */ | 2386 | /* mempool bind completed, no longer need any mempools in the table */ |
2400 | dm_table_free_md_mempools(t); | 2387 | dm_table_free_md_mempools(t); |
@@ -2774,6 +2761,14 @@ out_tag_set: | |||
2774 | return err; | 2761 | return err; |
2775 | } | 2762 | } |
2776 | 2763 | ||
2764 | static unsigned filter_md_type(unsigned type, struct mapped_device *md) | ||
2765 | { | ||
2766 | if (type == DM_TYPE_BIO_BASED) | ||
2767 | return type; | ||
2768 | |||
2769 | return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; | ||
2770 | } | ||
2771 | |||
2777 | /* | 2772 | /* |
2778 | * Setup the DM device's queue based on md's type | 2773 | * Setup the DM device's queue based on md's type |
2779 | */ | 2774 | */ |
@@ -3495,7 +3490,7 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, | |||
3495 | 3490 | ||
3496 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); | 3491 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); |
3497 | if (!pools) | 3492 | if (!pools) |
3498 | return ERR_PTR(-ENOMEM); | 3493 | return NULL; |
3499 | 3494 | ||
3500 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + | 3495 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + |
3501 | offsetof(struct dm_target_io, clone); | 3496 | offsetof(struct dm_target_io, clone); |
@@ -3514,26 +3509,24 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, | |||
3514 | return pools; | 3509 | return pools; |
3515 | out: | 3510 | out: |
3516 | dm_free_md_mempools(pools); | 3511 | dm_free_md_mempools(pools); |
3517 | return ERR_PTR(-ENOMEM); | 3512 | return NULL; |
3518 | } | 3513 | } |
3519 | 3514 | ||
3520 | struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, | 3515 | struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, |
3521 | unsigned type) | 3516 | unsigned type) |
3522 | { | 3517 | { |
3523 | unsigned int pool_size; | 3518 | unsigned int pool_size = dm_get_reserved_rq_based_ios(); |
3524 | struct dm_md_mempools *pools; | 3519 | struct dm_md_mempools *pools; |
3525 | 3520 | ||
3526 | if (filter_md_type(type, md) == DM_TYPE_MQ_REQUEST_BASED) | ||
3527 | return NULL; /* No mempools needed */ | ||
3528 | |||
3529 | pool_size = dm_get_reserved_rq_based_ios(); | ||
3530 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); | 3521 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); |
3531 | if (!pools) | 3522 | if (!pools) |
3532 | return ERR_PTR(-ENOMEM); | 3523 | return NULL; |
3533 | 3524 | ||
3534 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); | 3525 | if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) { |
3535 | if (!pools->rq_pool) | 3526 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); |
3536 | goto out; | 3527 | if (!pools->rq_pool) |
3528 | goto out; | ||
3529 | } | ||
3537 | 3530 | ||
3538 | pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache); | 3531 | pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache); |
3539 | if (!pools->io_pool) | 3532 | if (!pools->io_pool) |
@@ -3542,7 +3535,7 @@ struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, | |||
3542 | return pools; | 3535 | return pools; |
3543 | out: | 3536 | out: |
3544 | dm_free_md_mempools(pools); | 3537 | dm_free_md_mempools(pools); |
3545 | return ERR_PTR(-ENOMEM); | 3538 | return NULL; |
3546 | } | 3539 | } |
3547 | 3540 | ||
3548 | void dm_free_md_mempools(struct dm_md_mempools *pools) | 3541 | void dm_free_md_mempools(struct dm_md_mempools *pools) |