aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-mpath.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2014-12-17 21:08:12 -0500
committerMike Snitzer <snitzer@redhat.com>2015-02-09 13:06:47 -0500
commite5863d9ad754926e7d3f38b43ac8bd48ef73b097 (patch)
tree0e4d75672884b7dc93296beea8ddf3833b4d7f38 /drivers/md/dm-mpath.c
parent466d89a6bcd500f64896b514f78b32e8d0b0303a (diff)
dm: allocate requests in target when stacking on blk-mq devices
For blk-mq request-based DM the responsibility of allocating a cloned request is transfered from DM core to the target type. Doing so enables the cloned request to be allocated from the appropriate blk-mq request_queue's pool (only the DM target, e.g. multipath, can know which block device to send a given cloned request to). Care was taken to preserve compatibility with old-style block request completion that requires request-based DM _not_ acquire the clone request's queue lock in the completion path. As such, there are now 2 different request-based DM target_type interfaces: 1) the original .map_rq() interface will continue to be used for non-blk-mq devices -- the preallocated clone request is passed in from DM core. 2) a new .clone_and_map_rq() and .release_clone_rq() will be used for blk-mq devices -- blk_get_request() and blk_put_request() are used respectively from these hooks. dm_table_set_type() was updated to detect if the request-based target is being stacked on blk-mq devices, if so DM_TYPE_MQ_REQUEST_BASED is set. DM core disallows switching the DM table's type after it is set. This means that there is no mixing of non-blk-mq and blk-mq devices within the same request-based DM table. [This patch was started by Keith and later heavily modified by Mike] Tested-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r--drivers/md/dm-mpath.c51
1 files changed, 43 insertions, 8 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2552b88f8953..863fc8c1ac06 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -11,6 +11,7 @@
11#include "dm-path-selector.h" 11#include "dm-path-selector.h"
12#include "dm-uevent.h" 12#include "dm-uevent.h"
13 13
14#include <linux/blkdev.h>
14#include <linux/ctype.h> 15#include <linux/ctype.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/mempool.h> 17#include <linux/mempool.h>
@@ -378,12 +379,13 @@ static int __must_push_back(struct multipath *m)
378/* 379/*
379 * Map cloned requests 380 * Map cloned requests
380 */ 381 */
381static int multipath_map(struct dm_target *ti, struct request *clone, 382static int __multipath_map(struct dm_target *ti, struct request *clone,
382 union map_info *map_context) 383 union map_info *map_context,
384 struct request *rq, struct request **__clone)
383{ 385{
384 struct multipath *m = (struct multipath *) ti->private; 386 struct multipath *m = (struct multipath *) ti->private;
385 int r = DM_MAPIO_REQUEUE; 387 int r = DM_MAPIO_REQUEUE;
386 size_t nr_bytes = blk_rq_bytes(clone); 388 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
387 struct pgpath *pgpath; 389 struct pgpath *pgpath;
388 struct block_device *bdev; 390 struct block_device *bdev;
389 struct dm_mpath_io *mpio; 391 struct dm_mpath_io *mpio;
@@ -416,12 +418,25 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
416 418
417 bdev = pgpath->path.dev->bdev; 419 bdev = pgpath->path.dev->bdev;
418 420
419 clone->q = bdev_get_queue(bdev);
420 clone->rq_disk = bdev->bd_disk;
421 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
422
423 spin_unlock_irq(&m->lock); 421 spin_unlock_irq(&m->lock);
424 422
423 if (clone) {
424 /* Old request-based interface: allocated clone is passed in */
425 clone->q = bdev_get_queue(bdev);
426 clone->rq_disk = bdev->bd_disk;
427 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
428 } else {
429 /* blk-mq request-based interface */
430 *__clone = blk_get_request(bdev_get_queue(bdev),
431 rq_data_dir(rq), GFP_KERNEL);
432 if (IS_ERR(*__clone))
433 /* ENOMEM, requeue */
434 return r;
435 (*__clone)->bio = (*__clone)->biotail = NULL;
436 (*__clone)->rq_disk = bdev->bd_disk;
437 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
438 }
439
425 if (pgpath->pg->ps.type->start_io) 440 if (pgpath->pg->ps.type->start_io)
426 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 441 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
427 &pgpath->path, 442 &pgpath->path,
@@ -434,6 +449,24 @@ out_unlock:
434 return r; 449 return r;
435} 450}
436 451
452static int multipath_map(struct dm_target *ti, struct request *clone,
453 union map_info *map_context)
454{
455 return __multipath_map(ti, clone, map_context, NULL, NULL);
456}
457
458static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
459 union map_info *map_context,
460 struct request **clone)
461{
462 return __multipath_map(ti, NULL, map_context, rq, clone);
463}
464
465static void multipath_release_clone(struct request *clone)
466{
467 blk_put_request(clone);
468}
469
437/* 470/*
438 * If we run out of usable paths, should we queue I/O or error it? 471 * If we run out of usable paths, should we queue I/O or error it?
439 */ 472 */
@@ -1670,11 +1703,13 @@ out:
1670 *---------------------------------------------------------------*/ 1703 *---------------------------------------------------------------*/
1671static struct target_type multipath_target = { 1704static struct target_type multipath_target = {
1672 .name = "multipath", 1705 .name = "multipath",
1673 .version = {1, 7, 0}, 1706 .version = {1, 8, 0},
1674 .module = THIS_MODULE, 1707 .module = THIS_MODULE,
1675 .ctr = multipath_ctr, 1708 .ctr = multipath_ctr,
1676 .dtr = multipath_dtr, 1709 .dtr = multipath_dtr,
1677 .map_rq = multipath_map, 1710 .map_rq = multipath_map,
1711 .clone_and_map_rq = multipath_clone_and_map,
1712 .release_clone_rq = multipath_release_clone,
1678 .rq_end_io = multipath_end_io, 1713 .rq_end_io = multipath_end_io,
1679 .presuspend = multipath_presuspend, 1714 .presuspend = multipath_presuspend,
1680 .postsuspend = multipath_postsuspend, 1715 .postsuspend = multipath_postsuspend,