summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-mpath.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-05-24 21:16:51 -0400
committerMike Snitzer <snitzer@redhat.com>2016-06-10 15:16:02 -0400
commite83068a5faafb8ca65d3b58bd1e1e3959ce1ddce (patch)
tree9158ec7acad94d7035153f84e8ff53205caf7315 /drivers/md/dm-mpath.c
parentbf661be1fcf9b1da8abc81a56ff41ce5964ce896 (diff)
dm mpath: add optional "queue_mode" feature
Allow a user to specify an optional feature 'queue_mode <mode>' where <mode> may be "bio", "rq" or "mq" -- which corresponds to bio-based, request_fn rq-based, and blk-mq rq-based respectively. If the queue_mode feature isn't specified the default for the "multipath" target is still "rq" but if dm_mod.use_blk_mq is set to Y it'll default to mode "mq". This new queue_mode feature introduces the ability for each multipath device to have its own queue_mode (whereas before this feature all multipath devices effectively had to have the same queue_mode). This commit also goes a long way to eliminate the awkward (ab)use of DM_TYPE_*, the associated filter_md_type() and other relatively fragile and difficult to maintain code. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r--drivers/md/dm-mpath.c149
1 files changed, 80 insertions, 69 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2d10ff780d84..7eac080fcb18 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -90,6 +90,8 @@ struct multipath {
90 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ 90 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
91 atomic_t pg_init_count; /* Number of times pg_init called */ 91 atomic_t pg_init_count; /* Number of times pg_init called */
92 92
93 unsigned queue_mode;
94
93 /* 95 /*
94 * We must use a mempool of dm_mpath_io structs so that we 96 * We must use a mempool of dm_mpath_io structs so that we
95 * can resubmit bios on error. 97 * can resubmit bios on error.
@@ -131,7 +133,6 @@ static void process_queued_bios(struct work_struct *work);
131#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */ 133#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
132#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */ 134#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
133#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */ 135#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
134#define MPATHF_BIO_BASED 7 /* Device is bio-based? */
135 136
136/*----------------------------------------------- 137/*-----------------------------------------------
137 * Allocation routines 138 * Allocation routines
@@ -191,8 +192,7 @@ static void free_priority_group(struct priority_group *pg,
191 kfree(pg); 192 kfree(pg);
192} 193}
193 194
194static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq, 195static struct multipath *alloc_multipath(struct dm_target *ti)
195 bool bio_based)
196{ 196{
197 struct multipath *m; 197 struct multipath *m;
198 198
@@ -210,25 +210,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq,
210 mutex_init(&m->work_mutex); 210 mutex_init(&m->work_mutex);
211 211
212 m->mpio_pool = NULL; 212 m->mpio_pool = NULL;
213 if (!use_blk_mq && !bio_based) { 213 m->queue_mode = DM_TYPE_NONE;
214 unsigned min_ios = dm_get_reserved_rq_based_ios();
215
216 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
217 if (!m->mpio_pool) {
218 kfree(m);
219 return NULL;
220 }
221 }
222
223 if (bio_based) {
224 INIT_WORK(&m->process_queued_bios, process_queued_bios);
225 set_bit(MPATHF_BIO_BASED, &m->flags);
226 /*
227 * bio-based doesn't support any direct scsi_dh management;
228 * it just discovers if a scsi_dh is attached.
229 */
230 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
231 }
232 214
233 m->ti = ti; 215 m->ti = ti;
234 ti->private = m; 216 ti->private = m;
@@ -237,6 +219,39 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq,
237 return m; 219 return m;
238} 220}
239 221
222static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
223{
224 if (m->queue_mode == DM_TYPE_NONE) {
225 /*
226 * Default to request-based.
227 */
228 if (dm_use_blk_mq(dm_table_get_md(ti->table)))
229 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
230 else
231 m->queue_mode = DM_TYPE_REQUEST_BASED;
232 }
233
234 if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
235 unsigned min_ios = dm_get_reserved_rq_based_ios();
236
237 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
238 if (!m->mpio_pool)
239 return -ENOMEM;
240 }
241 else if (m->queue_mode == DM_TYPE_BIO_BASED) {
242 INIT_WORK(&m->process_queued_bios, process_queued_bios);
243 /*
244 * bio-based doesn't support any direct scsi_dh management;
245 * it just discovers if a scsi_dh is attached.
246 */
247 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
248 }
249
250 dm_table_set_type(ti->table, m->queue_mode);
251
252 return 0;
253}
254
240static void free_multipath(struct multipath *m) 255static void free_multipath(struct multipath *m)
241{ 256{
242 struct priority_group *pg, *tmp; 257 struct priority_group *pg, *tmp;
@@ -653,7 +668,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
653 668
654static void process_queued_bios_list(struct multipath *m) 669static void process_queued_bios_list(struct multipath *m)
655{ 670{
656 if (test_bit(MPATHF_BIO_BASED, &m->flags)) 671 if (m->queue_mode == DM_TYPE_BIO_BASED)
657 queue_work(kmultipathd, &m->process_queued_bios); 672 queue_work(kmultipathd, &m->process_queued_bios);
658} 673}
659 674
@@ -964,7 +979,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
964 if (!hw_argc) 979 if (!hw_argc)
965 return 0; 980 return 0;
966 981
967 if (test_bit(MPATHF_BIO_BASED, &m->flags)) { 982 if (m->queue_mode == DM_TYPE_BIO_BASED) {
968 dm_consume_args(as, hw_argc); 983 dm_consume_args(as, hw_argc);
969 DMERR("bio-based multipath doesn't allow hardware handler args"); 984 DMERR("bio-based multipath doesn't allow hardware handler args");
970 return 0; 985 return 0;
@@ -1005,7 +1020,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1005 const char *arg_name; 1020 const char *arg_name;
1006 1021
1007 static struct dm_arg _args[] = { 1022 static struct dm_arg _args[] = {
1008 {0, 6, "invalid number of feature args"}, 1023 {0, 8, "invalid number of feature args"},
1009 {1, 50, "pg_init_retries must be between 1 and 50"}, 1024 {1, 50, "pg_init_retries must be between 1 and 50"},
1010 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, 1025 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1011 }; 1026 };
@@ -1045,6 +1060,24 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1045 continue; 1060 continue;
1046 } 1061 }
1047 1062
1063 if (!strcasecmp(arg_name, "queue_mode") &&
1064 (argc >= 1)) {
1065 const char *queue_mode_name = dm_shift_arg(as);
1066
1067 if (!strcasecmp(queue_mode_name, "bio"))
1068 m->queue_mode = DM_TYPE_BIO_BASED;
1069 else if (!strcasecmp(queue_mode_name, "rq"))
1070 m->queue_mode = DM_TYPE_REQUEST_BASED;
1071 else if (!strcasecmp(queue_mode_name, "mq"))
1072 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1073 else {
1074 ti->error = "Unknown 'queue_mode' requested";
1075 r = -EINVAL;
1076 }
1077 argc--;
1078 continue;
1079 }
1080
1048 ti->error = "Unrecognised multipath feature request"; 1081 ti->error = "Unrecognised multipath feature request";
1049 r = -EINVAL; 1082 r = -EINVAL;
1050 } while (argc && !r); 1083 } while (argc && !r);
@@ -1052,8 +1085,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1052 return r; 1085 return r;
1053} 1086}
1054 1087
1055static int __multipath_ctr(struct dm_target *ti, unsigned int argc, 1088static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1056 char **argv, bool bio_based)
1057{ 1089{
1058 /* target arguments */ 1090 /* target arguments */
1059 static struct dm_arg _args[] = { 1091 static struct dm_arg _args[] = {
@@ -1066,12 +1098,11 @@ static int __multipath_ctr(struct dm_target *ti, unsigned int argc,
1066 struct dm_arg_set as; 1098 struct dm_arg_set as;
1067 unsigned pg_count = 0; 1099 unsigned pg_count = 0;
1068 unsigned next_pg_num; 1100 unsigned next_pg_num;
1069 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
1070 1101
1071 as.argc = argc; 1102 as.argc = argc;
1072 as.argv = argv; 1103 as.argv = argv;
1073 1104
1074 m = alloc_multipath(ti, use_blk_mq, bio_based); 1105 m = alloc_multipath(ti);
1075 if (!m) { 1106 if (!m) {
1076 ti->error = "can't allocate multipath"; 1107 ti->error = "can't allocate multipath";
1077 return -EINVAL; 1108 return -EINVAL;
@@ -1081,6 +1112,10 @@ static int __multipath_ctr(struct dm_target *ti, unsigned int argc,
1081 if (r) 1112 if (r)
1082 goto bad; 1113 goto bad;
1083 1114
1115 r = alloc_multipath_stage2(ti, m);
1116 if (r)
1117 goto bad;
1118
1084 r = parse_hw_handler(&as, m); 1119 r = parse_hw_handler(&as, m);
1085 if (r) 1120 if (r)
1086 goto bad; 1121 goto bad;
@@ -1130,9 +1165,9 @@ static int __multipath_ctr(struct dm_target *ti, unsigned int argc,
1130 ti->num_flush_bios = 1; 1165 ti->num_flush_bios = 1;
1131 ti->num_discard_bios = 1; 1166 ti->num_discard_bios = 1;
1132 ti->num_write_same_bios = 1; 1167 ti->num_write_same_bios = 1;
1133 if (bio_based) 1168 if (m->queue_mode == DM_TYPE_BIO_BASED)
1134 ti->per_io_data_size = multipath_per_bio_data_size(); 1169 ti->per_io_data_size = multipath_per_bio_data_size();
1135 else if (use_blk_mq) 1170 else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
1136 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1171 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1137 1172
1138 return 0; 1173 return 0;
@@ -1142,16 +1177,6 @@ static int __multipath_ctr(struct dm_target *ti, unsigned int argc,
1142 return r; 1177 return r;
1143} 1178}
1144 1179
1145static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1146{
1147 return __multipath_ctr(ti, argc, argv, false);
1148}
1149
1150static int multipath_bio_ctr(struct dm_target *ti, unsigned argc, char **argv)
1151{
1152 return __multipath_ctr(ti, argc, argv, true);
1153}
1154
1155static void multipath_wait_for_pg_init_completion(struct multipath *m) 1180static void multipath_wait_for_pg_init_completion(struct multipath *m)
1156{ 1181{
1157 DECLARE_WAITQUEUE(wait, current); 1182 DECLARE_WAITQUEUE(wait, current);
@@ -1700,7 +1725,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1700 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + 1725 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1701 (m->pg_init_retries > 0) * 2 + 1726 (m->pg_init_retries > 0) * 2 +
1702 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + 1727 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1703 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)); 1728 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1729 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1730
1704 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1731 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1705 DMEMIT("queue_if_no_path "); 1732 DMEMIT("queue_if_no_path ");
1706 if (m->pg_init_retries) 1733 if (m->pg_init_retries)
@@ -1709,6 +1736,16 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1709 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); 1736 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1710 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) 1737 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1711 DMEMIT("retain_attached_hw_handler "); 1738 DMEMIT("retain_attached_hw_handler ");
1739 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1740 switch(m->queue_mode) {
1741 case DM_TYPE_BIO_BASED:
1742 DMEMIT("queue_mode bio ");
1743 break;
1744 case DM_TYPE_MQ_REQUEST_BASED:
1745 DMEMIT("queue_mode mq ");
1746 break;
1747 }
1748 }
1712 } 1749 }
1713 1750
1714 if (!m->hw_handler_name || type == STATUSTYPE_INFO) 1751 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1995,7 +2032,7 @@ static int multipath_busy(struct dm_target *ti)
1995 *---------------------------------------------------------------*/ 2032 *---------------------------------------------------------------*/
1996static struct target_type multipath_target = { 2033static struct target_type multipath_target = {
1997 .name = "multipath", 2034 .name = "multipath",
1998 .version = {1, 11, 0}, 2035 .version = {1, 12, 0},
1999 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, 2036 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
2000 .module = THIS_MODULE, 2037 .module = THIS_MODULE,
2001 .ctr = multipath_ctr, 2038 .ctr = multipath_ctr,
@@ -2004,22 +2041,6 @@ static struct target_type multipath_target = {
2004 .clone_and_map_rq = multipath_clone_and_map, 2041 .clone_and_map_rq = multipath_clone_and_map,
2005 .release_clone_rq = multipath_release_clone, 2042 .release_clone_rq = multipath_release_clone,
2006 .rq_end_io = multipath_end_io, 2043 .rq_end_io = multipath_end_io,
2007 .presuspend = multipath_presuspend,
2008 .postsuspend = multipath_postsuspend,
2009 .resume = multipath_resume,
2010 .status = multipath_status,
2011 .message = multipath_message,
2012 .prepare_ioctl = multipath_prepare_ioctl,
2013 .iterate_devices = multipath_iterate_devices,
2014 .busy = multipath_busy,
2015};
2016
2017static struct target_type multipath_bio_target = {
2018 .name = "multipath-bio",
2019 .version = {1, 0, 0},
2020 .module = THIS_MODULE,
2021 .ctr = multipath_bio_ctr,
2022 .dtr = multipath_dtr,
2023 .map = multipath_map_bio, 2044 .map = multipath_map_bio,
2024 .end_io = multipath_end_io_bio, 2045 .end_io = multipath_end_io_bio,
2025 .presuspend = multipath_presuspend, 2046 .presuspend = multipath_presuspend,
@@ -2048,13 +2069,6 @@ static int __init dm_multipath_init(void)
2048 goto bad_register_target; 2069 goto bad_register_target;
2049 } 2070 }
2050 2071
2051 r = dm_register_target(&multipath_bio_target);
2052 if (r < 0) {
2053 DMERR("bio-based register failed %d", r);
2054 r = -EINVAL;
2055 goto bad_register_bio_based_target;
2056 }
2057
2058 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 2072 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2059 if (!kmultipathd) { 2073 if (!kmultipathd) {
2060 DMERR("failed to create workqueue kmpathd"); 2074 DMERR("failed to create workqueue kmpathd");
@@ -2081,8 +2095,6 @@ static int __init dm_multipath_init(void)
2081bad_alloc_kmpath_handlerd: 2095bad_alloc_kmpath_handlerd:
2082 destroy_workqueue(kmultipathd); 2096 destroy_workqueue(kmultipathd);
2083bad_alloc_kmultipathd: 2097bad_alloc_kmultipathd:
2084 dm_unregister_target(&multipath_bio_target);
2085bad_register_bio_based_target:
2086 dm_unregister_target(&multipath_target); 2098 dm_unregister_target(&multipath_target);
2087bad_register_target: 2099bad_register_target:
2088 kmem_cache_destroy(_mpio_cache); 2100 kmem_cache_destroy(_mpio_cache);
@@ -2096,7 +2108,6 @@ static void __exit dm_multipath_exit(void)
2096 destroy_workqueue(kmultipathd); 2108 destroy_workqueue(kmultipathd);
2097 2109
2098 dm_unregister_target(&multipath_target); 2110 dm_unregister_target(&multipath_target);
2099 dm_unregister_target(&multipath_bio_target);
2100 kmem_cache_destroy(_mpio_cache); 2111 kmem_cache_destroy(_mpio_cache);
2101} 2112}
2102 2113