diff options
author | Mike Snitzer <snitzer@redhat.com> | 2014-12-17 21:08:12 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2015-02-09 13:06:47 -0500 |
commit | e5863d9ad754926e7d3f38b43ac8bd48ef73b097 (patch) | |
tree | 0e4d75672884b7dc93296beea8ddf3833b4d7f38 | |
parent | 466d89a6bcd500f64896b514f78b32e8d0b0303a (diff) |
dm: allocate requests in target when stacking on blk-mq devices
For blk-mq request-based DM the responsibility of allocating a cloned
request is transfered from DM core to the target type. Doing so
enables the cloned request to be allocated from the appropriate
blk-mq request_queue's pool (only the DM target, e.g. multipath, can
know which block device to send a given cloned request to).
Care was taken to preserve compatibility with old-style block request
completion that requires request-based DM _not_ acquire the clone
request's queue lock in the completion path. As such, there are now 2
different request-based DM target_type interfaces:
1) the original .map_rq() interface will continue to be used for
non-blk-mq devices -- the preallocated clone request is passed in
from DM core.
2) a new .clone_and_map_rq() and .release_clone_rq() will be used for
blk-mq devices -- blk_get_request() and blk_put_request() are used
respectively from these hooks.
dm_table_set_type() was updated to detect if the request-based target is
being stacked on blk-mq devices, if so DM_TYPE_MQ_REQUEST_BASED is set.
DM core disallows switching the DM table's type after it is set. This
means that there is no mixing of non-blk-mq and blk-mq devices within
the same request-based DM table.
[This patch was started by Keith and later heavily modified by Mike]
Tested-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-mpath.c | 51 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 34 | ||||
-rw-r--r-- | drivers/md/dm-target.c | 15 | ||||
-rw-r--r-- | drivers/md/dm.c | 114 | ||||
-rw-r--r-- | drivers/md/dm.h | 8 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 7 | ||||
-rw-r--r-- | include/uapi/linux/dm-ioctl.h | 4 |
7 files changed, 185 insertions, 48 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 2552b88f8953..863fc8c1ac06 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include "dm-path-selector.h" | 11 | #include "dm-path-selector.h" |
12 | #include "dm-uevent.h" | 12 | #include "dm-uevent.h" |
13 | 13 | ||
14 | #include <linux/blkdev.h> | ||
14 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
16 | #include <linux/mempool.h> | 17 | #include <linux/mempool.h> |
@@ -378,12 +379,13 @@ static int __must_push_back(struct multipath *m) | |||
378 | /* | 379 | /* |
379 | * Map cloned requests | 380 | * Map cloned requests |
380 | */ | 381 | */ |
381 | static int multipath_map(struct dm_target *ti, struct request *clone, | 382 | static int __multipath_map(struct dm_target *ti, struct request *clone, |
382 | union map_info *map_context) | 383 | union map_info *map_context, |
384 | struct request *rq, struct request **__clone) | ||
383 | { | 385 | { |
384 | struct multipath *m = (struct multipath *) ti->private; | 386 | struct multipath *m = (struct multipath *) ti->private; |
385 | int r = DM_MAPIO_REQUEUE; | 387 | int r = DM_MAPIO_REQUEUE; |
386 | size_t nr_bytes = blk_rq_bytes(clone); | 388 | size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); |
387 | struct pgpath *pgpath; | 389 | struct pgpath *pgpath; |
388 | struct block_device *bdev; | 390 | struct block_device *bdev; |
389 | struct dm_mpath_io *mpio; | 391 | struct dm_mpath_io *mpio; |
@@ -416,12 +418,25 @@ static int multipath_map(struct dm_target *ti, struct request *clone, | |||
416 | 418 | ||
417 | bdev = pgpath->path.dev->bdev; | 419 | bdev = pgpath->path.dev->bdev; |
418 | 420 | ||
419 | clone->q = bdev_get_queue(bdev); | ||
420 | clone->rq_disk = bdev->bd_disk; | ||
421 | clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; | ||
422 | |||
423 | spin_unlock_irq(&m->lock); | 421 | spin_unlock_irq(&m->lock); |
424 | 422 | ||
423 | if (clone) { | ||
424 | /* Old request-based interface: allocated clone is passed in */ | ||
425 | clone->q = bdev_get_queue(bdev); | ||
426 | clone->rq_disk = bdev->bd_disk; | ||
427 | clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; | ||
428 | } else { | ||
429 | /* blk-mq request-based interface */ | ||
430 | *__clone = blk_get_request(bdev_get_queue(bdev), | ||
431 | rq_data_dir(rq), GFP_KERNEL); | ||
432 | if (IS_ERR(*__clone)) | ||
433 | /* ENOMEM, requeue */ | ||
434 | return r; | ||
435 | (*__clone)->bio = (*__clone)->biotail = NULL; | ||
436 | (*__clone)->rq_disk = bdev->bd_disk; | ||
437 | (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; | ||
438 | } | ||
439 | |||
425 | if (pgpath->pg->ps.type->start_io) | 440 | if (pgpath->pg->ps.type->start_io) |
426 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, | 441 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, |
427 | &pgpath->path, | 442 | &pgpath->path, |
@@ -434,6 +449,24 @@ out_unlock: | |||
434 | return r; | 449 | return r; |
435 | } | 450 | } |
436 | 451 | ||
452 | static int multipath_map(struct dm_target *ti, struct request *clone, | ||
453 | union map_info *map_context) | ||
454 | { | ||
455 | return __multipath_map(ti, clone, map_context, NULL, NULL); | ||
456 | } | ||
457 | |||
458 | static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | ||
459 | union map_info *map_context, | ||
460 | struct request **clone) | ||
461 | { | ||
462 | return __multipath_map(ti, NULL, map_context, rq, clone); | ||
463 | } | ||
464 | |||
465 | static void multipath_release_clone(struct request *clone) | ||
466 | { | ||
467 | blk_put_request(clone); | ||
468 | } | ||
469 | |||
437 | /* | 470 | /* |
438 | * If we run out of usable paths, should we queue I/O or error it? | 471 | * If we run out of usable paths, should we queue I/O or error it? |
439 | */ | 472 | */ |
@@ -1670,11 +1703,13 @@ out: | |||
1670 | *---------------------------------------------------------------*/ | 1703 | *---------------------------------------------------------------*/ |
1671 | static struct target_type multipath_target = { | 1704 | static struct target_type multipath_target = { |
1672 | .name = "multipath", | 1705 | .name = "multipath", |
1673 | .version = {1, 7, 0}, | 1706 | .version = {1, 8, 0}, |
1674 | .module = THIS_MODULE, | 1707 | .module = THIS_MODULE, |
1675 | .ctr = multipath_ctr, | 1708 | .ctr = multipath_ctr, |
1676 | .dtr = multipath_dtr, | 1709 | .dtr = multipath_dtr, |
1677 | .map_rq = multipath_map, | 1710 | .map_rq = multipath_map, |
1711 | .clone_and_map_rq = multipath_clone_and_map, | ||
1712 | .release_clone_rq = multipath_release_clone, | ||
1678 | .rq_end_io = multipath_end_io, | 1713 | .rq_end_io = multipath_end_io, |
1679 | .presuspend = multipath_presuspend, | 1714 | .presuspend = multipath_presuspend, |
1680 | .postsuspend = multipath_postsuspend, | 1715 | .postsuspend = multipath_postsuspend, |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3afae9e062f8..2d7e373955f3 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -827,6 +827,7 @@ static int dm_table_set_type(struct dm_table *t) | |||
827 | { | 827 | { |
828 | unsigned i; | 828 | unsigned i; |
829 | unsigned bio_based = 0, request_based = 0, hybrid = 0; | 829 | unsigned bio_based = 0, request_based = 0, hybrid = 0; |
830 | bool use_blk_mq = false; | ||
830 | struct dm_target *tgt; | 831 | struct dm_target *tgt; |
831 | struct dm_dev_internal *dd; | 832 | struct dm_dev_internal *dd; |
832 | struct list_head *devices; | 833 | struct list_head *devices; |
@@ -872,11 +873,26 @@ static int dm_table_set_type(struct dm_table *t) | |||
872 | /* Non-request-stackable devices can't be used for request-based dm */ | 873 | /* Non-request-stackable devices can't be used for request-based dm */ |
873 | devices = dm_table_get_devices(t); | 874 | devices = dm_table_get_devices(t); |
874 | list_for_each_entry(dd, devices, list) { | 875 | list_for_each_entry(dd, devices, list) { |
875 | if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev->bdev))) { | 876 | struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); |
876 | DMWARN("table load rejected: including" | 877 | |
877 | " non-request-stackable devices"); | 878 | if (!blk_queue_stackable(q)) { |
879 | DMERR("table load rejected: including" | ||
880 | " non-request-stackable devices"); | ||
878 | return -EINVAL; | 881 | return -EINVAL; |
879 | } | 882 | } |
883 | |||
884 | if (q->mq_ops) | ||
885 | use_blk_mq = true; | ||
886 | } | ||
887 | |||
888 | if (use_blk_mq) { | ||
889 | /* verify _all_ devices in the table are blk-mq devices */ | ||
890 | list_for_each_entry(dd, devices, list) | ||
891 | if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { | ||
892 | DMERR("table load rejected: not all devices" | ||
893 | " are blk-mq request-stackable"); | ||
894 | return -EINVAL; | ||
895 | } | ||
880 | } | 896 | } |
881 | 897 | ||
882 | /* | 898 | /* |
@@ -890,7 +906,7 @@ static int dm_table_set_type(struct dm_table *t) | |||
890 | return -EINVAL; | 906 | return -EINVAL; |
891 | } | 907 | } |
892 | 908 | ||
893 | t->type = DM_TYPE_REQUEST_BASED; | 909 | t->type = !use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; |
894 | 910 | ||
895 | return 0; | 911 | return 0; |
896 | } | 912 | } |
@@ -907,7 +923,15 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) | |||
907 | 923 | ||
908 | bool dm_table_request_based(struct dm_table *t) | 924 | bool dm_table_request_based(struct dm_table *t) |
909 | { | 925 | { |
910 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; | 926 | unsigned table_type = dm_table_get_type(t); |
927 | |||
928 | return (table_type == DM_TYPE_REQUEST_BASED || | ||
929 | table_type == DM_TYPE_MQ_REQUEST_BASED); | ||
930 | } | ||
931 | |||
932 | bool dm_table_mq_request_based(struct dm_table *t) | ||
933 | { | ||
934 | return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED; | ||
911 | } | 935 | } |
912 | 936 | ||
913 | static int dm_table_alloc_md_mempools(struct dm_table *t) | 937 | static int dm_table_alloc_md_mempools(struct dm_table *t) |
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 242e3cec397a..925ec1b15e75 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c | |||
@@ -137,13 +137,26 @@ static int io_err_map_rq(struct dm_target *ti, struct request *clone, | |||
137 | return -EIO; | 137 | return -EIO; |
138 | } | 138 | } |
139 | 139 | ||
140 | static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, | ||
141 | union map_info *map_context, | ||
142 | struct request **clone) | ||
143 | { | ||
144 | return -EIO; | ||
145 | } | ||
146 | |||
147 | static void io_err_release_clone_rq(struct request *clone) | ||
148 | { | ||
149 | } | ||
150 | |||
140 | static struct target_type error_target = { | 151 | static struct target_type error_target = { |
141 | .name = "error", | 152 | .name = "error", |
142 | .version = {1, 2, 0}, | 153 | .version = {1, 3, 0}, |
143 | .ctr = io_err_ctr, | 154 | .ctr = io_err_ctr, |
144 | .dtr = io_err_dtr, | 155 | .dtr = io_err_dtr, |
145 | .map = io_err_map, | 156 | .map = io_err_map, |
146 | .map_rq = io_err_map_rq, | 157 | .map_rq = io_err_map_rq, |
158 | .clone_and_map_rq = io_err_clone_and_map_rq, | ||
159 | .release_clone_rq = io_err_release_clone_rq, | ||
147 | }; | 160 | }; |
148 | 161 | ||
149 | int __init dm_target_init(void) | 162 | int __init dm_target_init(void) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ae1219893948..549b815999a1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1044,7 +1044,10 @@ static void free_rq_clone(struct request *clone) | |||
1044 | struct dm_rq_target_io *tio = clone->end_io_data; | 1044 | struct dm_rq_target_io *tio = clone->end_io_data; |
1045 | 1045 | ||
1046 | blk_rq_unprep_clone(clone); | 1046 | blk_rq_unprep_clone(clone); |
1047 | free_clone_request(tio->md, clone); | 1047 | if (clone->q && clone->q->mq_ops) |
1048 | tio->ti->type->release_clone_rq(clone); | ||
1049 | else | ||
1050 | free_clone_request(tio->md, clone); | ||
1048 | free_rq_tio(tio); | 1051 | free_rq_tio(tio); |
1049 | } | 1052 | } |
1050 | 1053 | ||
@@ -1086,7 +1089,8 @@ static void dm_unprep_request(struct request *rq) | |||
1086 | rq->special = NULL; | 1089 | rq->special = NULL; |
1087 | rq->cmd_flags &= ~REQ_DONTPREP; | 1090 | rq->cmd_flags &= ~REQ_DONTPREP; |
1088 | 1091 | ||
1089 | free_rq_clone(clone); | 1092 | if (clone) |
1093 | free_rq_clone(clone); | ||
1090 | } | 1094 | } |
1091 | 1095 | ||
1092 | /* | 1096 | /* |
@@ -1185,6 +1189,13 @@ static void dm_softirq_done(struct request *rq) | |||
1185 | struct dm_rq_target_io *tio = rq->special; | 1189 | struct dm_rq_target_io *tio = rq->special; |
1186 | struct request *clone = tio->clone; | 1190 | struct request *clone = tio->clone; |
1187 | 1191 | ||
1192 | if (!clone) { | ||
1193 | blk_end_request_all(rq, tio->error); | ||
1194 | rq_completed(tio->md, rq_data_dir(rq), false); | ||
1195 | free_rq_tio(tio); | ||
1196 | return; | ||
1197 | } | ||
1198 | |||
1188 | if (rq->cmd_flags & REQ_FAILED) | 1199 | if (rq->cmd_flags & REQ_FAILED) |
1189 | mapped = false; | 1200 | mapped = false; |
1190 | 1201 | ||
@@ -1207,7 +1218,7 @@ static void dm_complete_request(struct request *rq, int error) | |||
1207 | * Complete the not-mapped clone and the original request with the error status | 1218 | * Complete the not-mapped clone and the original request with the error status |
1208 | * through softirq context. | 1219 | * through softirq context. |
1209 | * Target's rq_end_io() function isn't called. | 1220 | * Target's rq_end_io() function isn't called. |
1210 | * This may be used when the target's map_rq() function fails. | 1221 | * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. |
1211 | */ | 1222 | */ |
1212 | static void dm_kill_unmapped_request(struct request *rq, int error) | 1223 | static void dm_kill_unmapped_request(struct request *rq, int error) |
1213 | { | 1224 | { |
@@ -1222,13 +1233,15 @@ static void end_clone_request(struct request *clone, int error) | |||
1222 | { | 1233 | { |
1223 | struct dm_rq_target_io *tio = clone->end_io_data; | 1234 | struct dm_rq_target_io *tio = clone->end_io_data; |
1224 | 1235 | ||
1225 | /* | 1236 | if (!clone->q->mq_ops) { |
1226 | * For just cleaning up the information of the queue in which | 1237 | /* |
1227 | * the clone was dispatched. | 1238 | * For just cleaning up the information of the queue in which |
1228 | * The clone is *NOT* freed actually here because it is alloced from | 1239 | * the clone was dispatched. |
1229 | * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. | 1240 | * The clone is *NOT* freed actually here because it is alloced |
1230 | */ | 1241 | * from dm own mempool (REQ_ALLOCED isn't set). |
1231 | __blk_put_request(clone->q, clone); | 1242 | */ |
1243 | __blk_put_request(clone->q, clone); | ||
1244 | } | ||
1232 | 1245 | ||
1233 | /* | 1246 | /* |
1234 | * Actual request completion is done in a softirq context which doesn't | 1247 | * Actual request completion is done in a softirq context which doesn't |
@@ -1789,6 +1802,8 @@ static struct dm_rq_target_io *prep_tio(struct request *rq, | |||
1789 | struct mapped_device *md, gfp_t gfp_mask) | 1802 | struct mapped_device *md, gfp_t gfp_mask) |
1790 | { | 1803 | { |
1791 | struct dm_rq_target_io *tio; | 1804 | struct dm_rq_target_io *tio; |
1805 | int srcu_idx; | ||
1806 | struct dm_table *table; | ||
1792 | 1807 | ||
1793 | tio = alloc_rq_tio(md, gfp_mask); | 1808 | tio = alloc_rq_tio(md, gfp_mask); |
1794 | if (!tio) | 1809 | if (!tio) |
@@ -1802,10 +1817,15 @@ static struct dm_rq_target_io *prep_tio(struct request *rq, | |||
1802 | memset(&tio->info, 0, sizeof(tio->info)); | 1817 | memset(&tio->info, 0, sizeof(tio->info)); |
1803 | init_kthread_work(&tio->work, map_tio_request); | 1818 | init_kthread_work(&tio->work, map_tio_request); |
1804 | 1819 | ||
1805 | if (!clone_rq(rq, md, tio, gfp_mask)) { | 1820 | table = dm_get_live_table(md, &srcu_idx); |
1806 | free_rq_tio(tio); | 1821 | if (!dm_table_mq_request_based(table)) { |
1807 | return NULL; | 1822 | if (!clone_rq(rq, md, tio, gfp_mask)) { |
1823 | dm_put_live_table(md, srcu_idx); | ||
1824 | free_rq_tio(tio); | ||
1825 | return NULL; | ||
1826 | } | ||
1808 | } | 1827 | } |
1828 | dm_put_live_table(md, srcu_idx); | ||
1809 | 1829 | ||
1810 | return tio; | 1830 | return tio; |
1811 | } | 1831 | } |
@@ -1835,17 +1855,36 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) | |||
1835 | 1855 | ||
1836 | /* | 1856 | /* |
1837 | * Returns: | 1857 | * Returns: |
1838 | * 0 : the request has been processed (not requeued) | 1858 | * 0 : the request has been processed |
1839 | * !0 : the request has been requeued | 1859 | * DM_MAPIO_REQUEUE : the original request needs to be requeued |
1860 | * < 0 : the request was completed due to failure | ||
1840 | */ | 1861 | */ |
1841 | static int map_request(struct dm_target *ti, struct request *rq, | 1862 | static int map_request(struct dm_target *ti, struct request *rq, |
1842 | struct mapped_device *md) | 1863 | struct mapped_device *md) |
1843 | { | 1864 | { |
1844 | int r, requeued = 0; | 1865 | int r; |
1845 | struct dm_rq_target_io *tio = rq->special; | 1866 | struct dm_rq_target_io *tio = rq->special; |
1846 | struct request *clone = tio->clone; | 1867 | struct request *clone = NULL; |
1868 | |||
1869 | if (tio->clone) { | ||
1870 | clone = tio->clone; | ||
1871 | r = ti->type->map_rq(ti, clone, &tio->info); | ||
1872 | } else { | ||
1873 | r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); | ||
1874 | if (r < 0) { | ||
1875 | /* The target wants to complete the I/O */ | ||
1876 | dm_kill_unmapped_request(rq, r); | ||
1877 | return r; | ||
1878 | } | ||
1879 | if (IS_ERR(clone)) | ||
1880 | return DM_MAPIO_REQUEUE; | ||
1881 | if (setup_clone(clone, rq, tio, GFP_KERNEL)) { | ||
1882 | /* -ENOMEM */ | ||
1883 | ti->type->release_clone_rq(clone); | ||
1884 | return DM_MAPIO_REQUEUE; | ||
1885 | } | ||
1886 | } | ||
1847 | 1887 | ||
1848 | r = ti->type->map_rq(ti, clone, &tio->info); | ||
1849 | switch (r) { | 1888 | switch (r) { |
1850 | case DM_MAPIO_SUBMITTED: | 1889 | case DM_MAPIO_SUBMITTED: |
1851 | /* The target has taken the I/O to submit by itself later */ | 1890 | /* The target has taken the I/O to submit by itself later */ |
@@ -1859,7 +1898,6 @@ static int map_request(struct dm_target *ti, struct request *rq, | |||
1859 | case DM_MAPIO_REQUEUE: | 1898 | case DM_MAPIO_REQUEUE: |
1860 | /* The target wants to requeue the I/O */ | 1899 | /* The target wants to requeue the I/O */ |
1861 | dm_requeue_unmapped_request(clone); | 1900 | dm_requeue_unmapped_request(clone); |
1862 | requeued = 1; | ||
1863 | break; | 1901 | break; |
1864 | default: | 1902 | default: |
1865 | if (r > 0) { | 1903 | if (r > 0) { |
@@ -1869,17 +1907,20 @@ static int map_request(struct dm_target *ti, struct request *rq, | |||
1869 | 1907 | ||
1870 | /* The target wants to complete the I/O */ | 1908 | /* The target wants to complete the I/O */ |
1871 | dm_kill_unmapped_request(rq, r); | 1909 | dm_kill_unmapped_request(rq, r); |
1872 | break; | 1910 | return r; |
1873 | } | 1911 | } |
1874 | 1912 | ||
1875 | return requeued; | 1913 | return 0; |
1876 | } | 1914 | } |
1877 | 1915 | ||
1878 | static void map_tio_request(struct kthread_work *work) | 1916 | static void map_tio_request(struct kthread_work *work) |
1879 | { | 1917 | { |
1880 | struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); | 1918 | struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); |
1919 | struct request *rq = tio->orig; | ||
1920 | struct mapped_device *md = tio->md; | ||
1881 | 1921 | ||
1882 | map_request(tio->ti, tio->orig, tio->md); | 1922 | if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE) |
1923 | dm_requeue_unmapped_original_request(md, rq); | ||
1883 | } | 1924 | } |
1884 | 1925 | ||
1885 | static void dm_start_request(struct mapped_device *md, struct request *orig) | 1926 | static void dm_start_request(struct mapped_device *md, struct request *orig) |
@@ -2459,6 +2500,14 @@ unsigned dm_get_md_type(struct mapped_device *md) | |||
2459 | return md->type; | 2500 | return md->type; |
2460 | } | 2501 | } |
2461 | 2502 | ||
2503 | static bool dm_md_type_request_based(struct mapped_device *md) | ||
2504 | { | ||
2505 | unsigned table_type = dm_get_md_type(md); | ||
2506 | |||
2507 | return (table_type == DM_TYPE_REQUEST_BASED || | ||
2508 | table_type == DM_TYPE_MQ_REQUEST_BASED); | ||
2509 | } | ||
2510 | |||
2462 | struct target_type *dm_get_immutable_target_type(struct mapped_device *md) | 2511 | struct target_type *dm_get_immutable_target_type(struct mapped_device *md) |
2463 | { | 2512 | { |
2464 | return md->immutable_target_type; | 2513 | return md->immutable_target_type; |
@@ -2511,8 +2560,7 @@ static int dm_init_request_based_queue(struct mapped_device *md) | |||
2511 | */ | 2560 | */ |
2512 | int dm_setup_md_queue(struct mapped_device *md) | 2561 | int dm_setup_md_queue(struct mapped_device *md) |
2513 | { | 2562 | { |
2514 | if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && | 2563 | if (dm_md_type_request_based(md) && !dm_init_request_based_queue(md)) { |
2515 | !dm_init_request_based_queue(md)) { | ||
2516 | DMWARN("Cannot initialize queue for request-based mapped device"); | 2564 | DMWARN("Cannot initialize queue for request-based mapped device"); |
2517 | return -EINVAL; | 2565 | return -EINVAL; |
2518 | } | 2566 | } |
@@ -3184,27 +3232,35 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u | |||
3184 | { | 3232 | { |
3185 | struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); | 3233 | struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); |
3186 | struct kmem_cache *cachep; | 3234 | struct kmem_cache *cachep; |
3187 | unsigned int pool_size; | 3235 | unsigned int pool_size = 0; |
3188 | unsigned int front_pad; | 3236 | unsigned int front_pad; |
3189 | 3237 | ||
3190 | if (!pools) | 3238 | if (!pools) |
3191 | return NULL; | 3239 | return NULL; |
3192 | 3240 | ||
3193 | if (type == DM_TYPE_BIO_BASED) { | 3241 | switch (type) { |
3242 | case DM_TYPE_BIO_BASED: | ||
3194 | cachep = _io_cache; | 3243 | cachep = _io_cache; |
3195 | pool_size = dm_get_reserved_bio_based_ios(); | 3244 | pool_size = dm_get_reserved_bio_based_ios(); |
3196 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); | 3245 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); |
3197 | } else if (type == DM_TYPE_REQUEST_BASED) { | 3246 | break; |
3198 | cachep = _rq_tio_cache; | 3247 | case DM_TYPE_REQUEST_BASED: |
3199 | pool_size = dm_get_reserved_rq_based_ios(); | 3248 | pool_size = dm_get_reserved_rq_based_ios(); |
3200 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); | 3249 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); |
3201 | if (!pools->rq_pool) | 3250 | if (!pools->rq_pool) |
3202 | goto out; | 3251 | goto out; |
3252 | /* fall through to setup remaining rq-based pools */ | ||
3253 | case DM_TYPE_MQ_REQUEST_BASED: | ||
3254 | cachep = _rq_tio_cache; | ||
3255 | if (!pool_size) | ||
3256 | pool_size = dm_get_reserved_rq_based_ios(); | ||
3203 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); | 3257 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); |
3204 | /* per_bio_data_size is not used. See __bind_mempools(). */ | 3258 | /* per_bio_data_size is not used. See __bind_mempools(). */ |
3205 | WARN_ON(per_bio_data_size != 0); | 3259 | WARN_ON(per_bio_data_size != 0); |
3206 | } else | 3260 | break; |
3261 | default: | ||
3207 | goto out; | 3262 | goto out; |
3263 | } | ||
3208 | 3264 | ||
3209 | pools->io_pool = mempool_create_slab_pool(pool_size, cachep); | 3265 | pools->io_pool = mempool_create_slab_pool(pool_size, cachep); |
3210 | if (!pools->io_pool) | 3266 | if (!pools->io_pool) |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 84b0f9e4ba6c..84d79784b866 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -34,9 +34,10 @@ | |||
34 | /* | 34 | /* |
35 | * Type of table and mapped_device's mempool | 35 | * Type of table and mapped_device's mempool |
36 | */ | 36 | */ |
37 | #define DM_TYPE_NONE 0 | 37 | #define DM_TYPE_NONE 0 |
38 | #define DM_TYPE_BIO_BASED 1 | 38 | #define DM_TYPE_BIO_BASED 1 |
39 | #define DM_TYPE_REQUEST_BASED 2 | 39 | #define DM_TYPE_REQUEST_BASED 2 |
40 | #define DM_TYPE_MQ_REQUEST_BASED 3 | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * List of devices that a metadevice uses and should open/close. | 43 | * List of devices that a metadevice uses and should open/close. |
@@ -73,6 +74,7 @@ int dm_table_any_busy_target(struct dm_table *t); | |||
73 | unsigned dm_table_get_type(struct dm_table *t); | 74 | unsigned dm_table_get_type(struct dm_table *t); |
74 | struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); | 75 | struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); |
75 | bool dm_table_request_based(struct dm_table *t); | 76 | bool dm_table_request_based(struct dm_table *t); |
77 | bool dm_table_mq_request_based(struct dm_table *t); | ||
76 | void dm_table_free_md_mempools(struct dm_table *t); | 78 | void dm_table_free_md_mempools(struct dm_table *t); |
77 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); | 79 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); |
78 | 80 | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 19296fba58e8..2646aed1d3fe 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -48,6 +48,11 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti); | |||
48 | typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); | 48 | typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); |
49 | typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, | 49 | typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, |
50 | union map_info *map_context); | 50 | union map_info *map_context); |
51 | typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, | ||
52 | struct request *rq, | ||
53 | union map_info *map_context, | ||
54 | struct request **clone); | ||
55 | typedef void (*dm_release_clone_request_fn) (struct request *clone); | ||
51 | 56 | ||
52 | /* | 57 | /* |
53 | * Returns: | 58 | * Returns: |
@@ -143,6 +148,8 @@ struct target_type { | |||
143 | dm_dtr_fn dtr; | 148 | dm_dtr_fn dtr; |
144 | dm_map_fn map; | 149 | dm_map_fn map; |
145 | dm_map_request_fn map_rq; | 150 | dm_map_request_fn map_rq; |
151 | dm_clone_and_map_request_fn clone_and_map_rq; | ||
152 | dm_release_clone_request_fn release_clone_rq; | ||
146 | dm_endio_fn end_io; | 153 | dm_endio_fn end_io; |
147 | dm_request_endio_fn rq_end_io; | 154 | dm_request_endio_fn rq_end_io; |
148 | dm_presuspend_fn presuspend; | 155 | dm_presuspend_fn presuspend; |
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index a570d7b5796c..889f3a5b7b18 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h | |||
@@ -267,9 +267,9 @@ enum { | |||
267 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 267 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
268 | 268 | ||
269 | #define DM_VERSION_MAJOR 4 | 269 | #define DM_VERSION_MAJOR 4 |
270 | #define DM_VERSION_MINOR 29 | 270 | #define DM_VERSION_MINOR 30 |
271 | #define DM_VERSION_PATCHLEVEL 0 | 271 | #define DM_VERSION_PATCHLEVEL 0 |
272 | #define DM_VERSION_EXTRA "-ioctl (2014-10-28)" | 272 | #define DM_VERSION_EXTRA "-ioctl (2014-12-22)" |
273 | 273 | ||
274 | /* Status bits */ | 274 | /* Status bits */ |
275 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 275 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |