summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-01-31 12:05:42 -0500
committerMike Snitzer <snitzer@redhat.com>2016-02-22 22:34:37 -0500
commit591ddcfc4bfad28e096787b1159942124d49cd1e (patch)
tree985975cb6a1bc173243d938ae283df37d18e959c
parent30187e1d48a258e304af184c45c3140c8509d219 (diff)
dm: allow immutable request-based targets to use blk-mq pdu
This will allow DM multipath to use a portion of the blk-mq pdu space for target data (e.g. struct dm_mpath_io). Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm.c33
-rw-r--r--drivers/md/dm.h2
3 files changed, 28 insertions, 9 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4763c4ae30e4..2adf81d81fca 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1304,7 +1304,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
1304 dm_set_md_type(md, dm_table_get_type(t)); 1304 dm_set_md_type(md, dm_table_get_type(t));
1305 1305
1306 /* setup md->queue to reflect md's type (may block) */ 1306 /* setup md->queue to reflect md's type (may block) */
1307 r = dm_setup_md_queue(md); 1307 r = dm_setup_md_queue(md, t);
1308 if (r) { 1308 if (r) {
1309 DMWARN("unable to set up device queue for new table."); 1309 DMWARN("unable to set up device queue for new table.");
1310 goto err_unlock_md_type; 1310 goto err_unlock_md_type;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 89aa9618c061..92c2fee413b6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -224,7 +224,8 @@ struct mapped_device {
224 224
225 /* for blk-mq request-based DM support */ 225 /* for blk-mq request-based DM support */
226 struct blk_mq_tag_set *tag_set; 226 struct blk_mq_tag_set *tag_set;
227 bool use_blk_mq; 227 bool use_blk_mq:1;
228 bool init_tio_pdu:1;
228}; 229};
229 230
230#ifdef CONFIG_DM_MQ_DEFAULT 231#ifdef CONFIG_DM_MQ_DEFAULT
@@ -243,6 +244,7 @@ bool dm_use_blk_mq(struct mapped_device *md)
243{ 244{
244 return md->use_blk_mq; 245 return md->use_blk_mq;
245} 246}
247EXPORT_SYMBOL_GPL(dm_use_blk_mq);
246 248
247/* 249/*
248 * For mempools pre-allocation at the table loading time. 250 * For mempools pre-allocation at the table loading time.
@@ -1889,7 +1891,13 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
1889 tio->clone = NULL; 1891 tio->clone = NULL;
1890 tio->orig = rq; 1892 tio->orig = rq;
1891 tio->error = 0; 1893 tio->error = 0;
1892 memset(&tio->info, 0, sizeof(tio->info)); 1894 /*
1895 * Avoid initializing info for blk-mq; it passes
1896 * target-specific data through info.ptr
1897 * (see: dm_mq_init_request)
1898 */
1899 if (!md->init_tio_pdu)
1900 memset(&tio->info, 0, sizeof(tio->info));
1893 if (md->kworker_task) 1901 if (md->kworker_task)
1894 init_kthread_work(&tio->work, map_tio_request); 1902 init_kthread_work(&tio->work, map_tio_request);
1895} 1903}
@@ -2313,6 +2321,7 @@ static struct mapped_device *alloc_dev(int minor)
2313 goto bad_io_barrier; 2321 goto bad_io_barrier;
2314 2322
2315 md->use_blk_mq = use_blk_mq; 2323 md->use_blk_mq = use_blk_mq;
2324 md->init_tio_pdu = false;
2316 md->type = DM_TYPE_NONE; 2325 md->type = DM_TYPE_NONE;
2317 mutex_init(&md->suspend_lock); 2326 mutex_init(&md->suspend_lock);
2318 mutex_init(&md->type_lock); 2327 mutex_init(&md->type_lock);
@@ -2653,6 +2662,11 @@ static int dm_mq_init_request(void *data, struct request *rq,
2653 */ 2662 */
2654 tio->md = md; 2663 tio->md = md;
2655 2664
2665 if (md->init_tio_pdu) {
2666 /* target-specific per-io data is immediately after the tio */
2667 tio->info.ptr = tio + 1;
2668 }
2669
2656 return 0; 2670 return 0;
2657} 2671}
2658 2672
@@ -2704,7 +2718,8 @@ static struct blk_mq_ops dm_mq_ops = {
2704 .init_request = dm_mq_init_request, 2718 .init_request = dm_mq_init_request,
2705}; 2719};
2706 2720
2707static int dm_mq_init_request_queue(struct mapped_device *md) 2721static int dm_mq_init_request_queue(struct mapped_device *md,
2722 struct dm_target *immutable_tgt)
2708{ 2723{
2709 struct request_queue *q; 2724 struct request_queue *q;
2710 int err; 2725 int err;
@@ -2726,6 +2741,11 @@ static int dm_mq_init_request_queue(struct mapped_device *md)
2726 md->tag_set->driver_data = md; 2741 md->tag_set->driver_data = md;
2727 2742
2728 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); 2743 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
2744 if (immutable_tgt && immutable_tgt->per_io_data_size) {
2745 /* any target-specific per-io data is immediately after the tio */
2746 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
2747 md->init_tio_pdu = true;
2748 }
2729 2749
2730 err = blk_mq_alloc_tag_set(md->tag_set); 2750 err = blk_mq_alloc_tag_set(md->tag_set);
2731 if (err) 2751 if (err)
@@ -2763,7 +2783,7 @@ static unsigned filter_md_type(unsigned type, struct mapped_device *md)
2763/* 2783/*
2764 * Setup the DM device's queue based on md's type 2784 * Setup the DM device's queue based on md's type
2765 */ 2785 */
2766int dm_setup_md_queue(struct mapped_device *md) 2786int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2767{ 2787{
2768 int r; 2788 int r;
2769 unsigned md_type = filter_md_type(dm_get_md_type(md), md); 2789 unsigned md_type = filter_md_type(dm_get_md_type(md), md);
@@ -2777,7 +2797,7 @@ int dm_setup_md_queue(struct mapped_device *md)
2777 } 2797 }
2778 break; 2798 break;
2779 case DM_TYPE_MQ_REQUEST_BASED: 2799 case DM_TYPE_MQ_REQUEST_BASED:
2780 r = dm_mq_init_request_queue(md); 2800 r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t));
2781 if (r) { 2801 if (r) {
2782 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2802 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2783 return r; 2803 return r;
@@ -3505,8 +3525,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
3505 if (!pool_size) 3525 if (!pool_size)
3506 pool_size = dm_get_reserved_rq_based_ios(); 3526 pool_size = dm_get_reserved_rq_based_ios();
3507 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3527 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3508 /* per_io_data_size is not used. */ 3528 /* per_io_data_size is used for blk-mq pdu at queue allocation */
3509 WARN_ON(per_io_data_size != 0);
3510 break; 3529 break;
3511 default: 3530 default:
3512 BUG(); 3531 BUG();
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 4305a513c801..13a758ec0f88 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -86,7 +86,7 @@ void dm_set_md_type(struct mapped_device *md, unsigned type);
86unsigned dm_get_md_type(struct mapped_device *md); 86unsigned dm_get_md_type(struct mapped_device *md);
87struct target_type *dm_get_immutable_target_type(struct mapped_device *md); 87struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
88 88
89int dm_setup_md_queue(struct mapped_device *md); 89int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
90 90
91/* 91/*
92 * To check the return value from dm_table_find_target(). 92 * To check the return value from dm_table_find_target().