diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-02-20 14:02:49 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-02-22 22:33:46 -0500 |
commit | c5248f79f39e5254977a3916b2149c3ccffa2722 (patch) | |
tree | bd8a3e1663a3596d88461be3a731ac060f65e345 | |
parent | 818c5f3bef750eb5998b468f84391e4d656b97ed (diff) |
dm: remove support for stacking dm-mq on .request_fn device(s)
Remove all fiddley code that propped up this support for a blk-mq
request-queue ontop of all .request_fn devices.
Testing has proven this niche request-based dm-mq mode to be buggy, when
testing fault tolerance with DM multipath, and there is no point trying
to preserve it.
Should help improve efficiency of pure dm-mq code and make code
maintenance less delicate.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-mpath.c | 5 | ||||
-rw-r--r-- | drivers/md/dm.c | 55 |
2 files changed, 20 insertions, 40 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 3ddaa11584ef..7259eeba6a58 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -418,7 +418,10 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, | |||
418 | spin_unlock_irq(&m->lock); | 418 | spin_unlock_irq(&m->lock); |
419 | 419 | ||
420 | if (clone) { | 420 | if (clone) { |
421 | /* Old request-based interface: allocated clone is passed in */ | 421 | /* |
422 | * Old request-based interface: allocated clone is passed in. | ||
423 | * Used by: .request_fn stacked on .request_fn path(s). | ||
424 | */ | ||
422 | clone->q = bdev_get_queue(bdev); | 425 | clone->q = bdev_get_queue(bdev); |
423 | clone->rq_disk = bdev->bd_disk; | 426 | clone->rq_disk = bdev->bd_disk; |
424 | clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; | 427 | clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8a62e43fbff5..26fedd93702e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1141,11 +1141,6 @@ static void free_rq_clone(struct request *clone) | |||
1141 | else if (!md->queue->mq_ops) | 1141 | else if (!md->queue->mq_ops) |
1142 | /* request_fn queue stacked on request_fn queue(s) */ | 1142 | /* request_fn queue stacked on request_fn queue(s) */ |
1143 | free_clone_request(md, clone); | 1143 | free_clone_request(md, clone); |
1144 | /* | ||
1145 | * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: | ||
1146 | * no need to call free_clone_request() because we leverage blk-mq by | ||
1147 | * allocating the clone at the end of the blk-mq pdu (see: clone_rq) | ||
1148 | */ | ||
1149 | 1144 | ||
1150 | if (!md->queue->mq_ops) | 1145 | if (!md->queue->mq_ops) |
1151 | free_rq_tio(tio); | 1146 | free_rq_tio(tio); |
@@ -1866,24 +1861,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md, | |||
1866 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | 1861 | struct dm_rq_target_io *tio, gfp_t gfp_mask) |
1867 | { | 1862 | { |
1868 | /* | 1863 | /* |
1869 | * Do not allocate a clone if tio->clone was already set | 1864 | * Create clone for use with .request_fn request_queue |
1870 | * (see: dm_mq_queue_rq). | ||
1871 | */ | 1865 | */ |
1872 | bool alloc_clone = !tio->clone; | ||
1873 | struct request *clone; | 1866 | struct request *clone; |
1874 | 1867 | ||
1875 | if (alloc_clone) { | 1868 | clone = alloc_clone_request(md, gfp_mask); |
1876 | clone = alloc_clone_request(md, gfp_mask); | 1869 | if (!clone) |
1877 | if (!clone) | 1870 | return NULL; |
1878 | return NULL; | ||
1879 | } else | ||
1880 | clone = tio->clone; | ||
1881 | 1871 | ||
1882 | blk_rq_init(NULL, clone); | 1872 | blk_rq_init(NULL, clone); |
1883 | if (setup_clone(clone, rq, tio, gfp_mask)) { | 1873 | if (setup_clone(clone, rq, tio, gfp_mask)) { |
1884 | /* -ENOMEM */ | 1874 | /* -ENOMEM */ |
1885 | if (alloc_clone) | 1875 | free_clone_request(md, clone); |
1886 | free_clone_request(md, clone); | ||
1887 | return NULL; | 1876 | return NULL; |
1888 | } | 1877 | } |
1889 | 1878 | ||
@@ -2692,22 +2681,12 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
2692 | */ | 2681 | */ |
2693 | tio->ti = ti; | 2682 | tio->ti = ti; |
2694 | 2683 | ||
2695 | /* | 2684 | /* Direct call is fine since .queue_rq allows allocations */ |
2696 | * Both the table and md type cannot change after initial table load | 2685 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { |
2697 | */ | 2686 | /* Undo dm_start_request() before requeuing */ |
2698 | if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { | 2687 | rq_end_stats(md, rq); |
2699 | /* clone request is allocated at the end of the pdu */ | 2688 | rq_completed(md, rq_data_dir(rq), false); |
2700 | tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); | 2689 | return BLK_MQ_RQ_QUEUE_BUSY; |
2701 | (void) clone_rq(rq, md, tio, GFP_ATOMIC); | ||
2702 | queue_kthread_work(&md->kworker, &tio->work); | ||
2703 | } else { | ||
2704 | /* Direct call is fine since .queue_rq allows allocations */ | ||
2705 | if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { | ||
2706 | /* Undo dm_start_request() before requeuing */ | ||
2707 | rq_end_stats(md, rq); | ||
2708 | rq_completed(md, rq_data_dir(rq), false); | ||
2709 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
2710 | } | ||
2711 | } | 2690 | } |
2712 | 2691 | ||
2713 | return BLK_MQ_RQ_QUEUE_OK; | 2692 | return BLK_MQ_RQ_QUEUE_OK; |
@@ -2726,6 +2705,11 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) | |||
2726 | struct request_queue *q; | 2705 | struct request_queue *q; |
2727 | int err; | 2706 | int err; |
2728 | 2707 | ||
2708 | if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { | ||
2709 | DMERR("request-based dm-mq may only be stacked on blk-mq device(s)"); | ||
2710 | return -EINVAL; | ||
2711 | } | ||
2712 | |||
2729 | md->tag_set = kzalloc(sizeof(struct blk_mq_tag_set), GFP_KERNEL); | 2713 | md->tag_set = kzalloc(sizeof(struct blk_mq_tag_set), GFP_KERNEL); |
2730 | if (!md->tag_set) | 2714 | if (!md->tag_set) |
2731 | return -ENOMEM; | 2715 | return -ENOMEM; |
@@ -2738,10 +2722,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) | |||
2738 | md->tag_set->driver_data = md; | 2722 | md->tag_set->driver_data = md; |
2739 | 2723 | ||
2740 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); | 2724 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); |
2741 | if (md_type == DM_TYPE_REQUEST_BASED) { | ||
2742 | /* put the memory for non-blk-mq clone at the end of the pdu */ | ||
2743 | md->tag_set->cmd_size += sizeof(struct request); | ||
2744 | } | ||
2745 | 2725 | ||
2746 | err = blk_mq_alloc_tag_set(md->tag_set); | 2726 | err = blk_mq_alloc_tag_set(md->tag_set); |
2747 | if (err) | 2727 | if (err) |
@@ -2758,9 +2738,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) | |||
2758 | /* backfill 'mq' sysfs registration normally done in blk_register_queue */ | 2738 | /* backfill 'mq' sysfs registration normally done in blk_register_queue */ |
2759 | blk_mq_register_disk(md->disk); | 2739 | blk_mq_register_disk(md->disk); |
2760 | 2740 | ||
2761 | if (md_type == DM_TYPE_REQUEST_BASED) | ||
2762 | init_rq_based_worker_thread(md); | ||
2763 | |||
2764 | return 0; | 2741 | return 0; |
2765 | 2742 | ||
2766 | out_tag_set: | 2743 | out_tag_set: |