aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2018-10-11 17:44:04 -0400
committerMike Snitzer <snitzer@redhat.com>2018-10-11 17:51:13 -0400
commitcef6f55a9fb4f6d6f9df0f772aa64cf159997466 (patch)
tree1af91945c74ac972da4eba3e558fc72ba7bbf770
parent953923c09fe83255ae11845db1c9eb576ba73df8 (diff)
dm table: require that request-based DM be layered on blk-mq devices
Now that request-based DM (multipath) is blk-mq only: this restriction is required while the legacy request-based IO path still exists. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-table.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index eeea32bb6a3e..618edfc3846f 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -908,10 +908,21 @@ static bool dm_table_supports_dax(struct dm_table *t)
908 908
909static bool dm_table_does_not_support_partial_completion(struct dm_table *t); 909static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
910 910
911struct verify_rq_based_data {
912 unsigned sq_count;
913 unsigned mq_count;
914};
915
911static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev, 916static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
912 sector_t start, sector_t len, void *data) 917 sector_t start, sector_t len, void *data)
913{ 918{
914 struct request_queue *q = bdev_get_queue(dev->bdev); 919 struct request_queue *q = bdev_get_queue(dev->bdev);
920 struct verify_rq_based_data *v = data;
921
922 if (q->mq_ops)
923 v->mq_count++;
924 else
925 v->sq_count++;
915 926
916 return queue_is_rq_based(q); 927 return queue_is_rq_based(q);
917} 928}
@@ -920,6 +931,7 @@ static int dm_table_determine_type(struct dm_table *t)
920{ 931{
921 unsigned i; 932 unsigned i;
922 unsigned bio_based = 0, request_based = 0, hybrid = 0; 933 unsigned bio_based = 0, request_based = 0, hybrid = 0;
934 struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
923 struct dm_target *tgt; 935 struct dm_target *tgt;
924 struct list_head *devices = dm_table_get_devices(t); 936 struct list_head *devices = dm_table_get_devices(t);
925 enum dm_queue_mode live_md_type = dm_get_md_type(t->md); 937 enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -1022,10 +1034,14 @@ verify_rq_based:
1022 1034
1023 /* Non-request-stackable devices can't be used for request-based dm */ 1035 /* Non-request-stackable devices can't be used for request-based dm */
1024 if (!tgt->type->iterate_devices || 1036 if (!tgt->type->iterate_devices ||
1025 !tgt->type->iterate_devices(tgt, device_is_rq_based, NULL)) { 1037 !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
1026 DMERR("table load rejected: including non-request-stackable devices"); 1038 DMERR("table load rejected: including non-request-stackable devices");
1027 return -EINVAL; 1039 return -EINVAL;
1028 } 1040 }
1041 if (v.sq_count > 0) {
1042 DMERR("table load rejected: not all devices are blk-mq request-stackable");
1043 return -EINVAL;
1044 }
1029 1045
1030 return 0; 1046 return 0;
1031} 1047}