aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 15:57:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 15:57:38 -0400
commit71f4d95b23654ec2b347bd15b1260d68ca9ea5ea (patch)
tree837f9f8f1c361d9cae4aabe5784b9f6f414a171d /drivers/md
parent6080ad3a9941e4707bb929445b813fadca9a27ff (diff)
parentda4ad3a23af3d7f357b24b33e9fec7531b59ee49 (diff)
Merge tag 'for-4.20/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - The biggest change this cycle is to remove support for the legacy IO path (.request_fn) from request-based DM. Jens has already started preparing for complete removal of the legacy IO path in 4.21 but this earlier removal of support from DM has been coordinated with Jens (as evidenced by the commit being attributed to him). Making request-based DM exclussively blk-mq only cleans up that portion of DM core quite nicely. - Convert the thinp and zoned targets over to using refcount_t where applicable. - A couple fixes to the DM zoned target for refcounting and other races buried in the implementation of metadata block creation and use. - Small cleanups to remove redundant unlikely() around a couple WARN_ON_ONCE(). - Simplify how dm-ioctl copies from userspace, eliminating some potential for a malicious user trying to change the executed ioctl after its processing has begun. - Tweaked DM crypt target to use the DM device name when naming the various workqueues created for a particular DM crypt device (makes the N workqueues for a DM crypt device more easily understood and enhances user's accounting capabilities at a glance via "ps") - Small fixup to remove dead branch in DM writecache's memory_entry(). * tag 'for-4.20/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm writecache: remove disabled code in memory_entry() dm zoned: fix various dmz_get_mblock() issues dm zoned: fix metadata block ref counting dm raid: avoid bitmap with raid4/5/6 journal device dm crypt: make workqueue names device-specific dm: add dm_table_device_name() dm ioctl: harden copy_params()'s copy_from_user() from malicious users dm: remove unnecessary unlikely() around WARN_ON_ONCE() dm zoned: target: use refcount_t for dm zoned reference counters dm thin: use refcount_t for thin_c reference counting dm table: require that request-based DM be layered on blk-mq devices dm: rename DM_TYPE_MQ_REQUEST_BASED to DM_TYPE_REQUEST_BASED dm: remove legacy request-based IO path
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h10
-rw-r--r--drivers/md/dm-crypt.c15
-rw-r--r--drivers/md/dm-ioctl.c18
-rw-r--r--drivers/md/dm-mpath.c26
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c316
-rw-r--r--drivers/md/dm-rq.h4
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c46
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-writecache.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c80
-rw-r--r--drivers/md/dm-zoned-target.c20
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/dm.h1
17 files changed, 131 insertions, 461 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8b8c123cae66..3db222509e44 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -215,17 +215,6 @@ config BLK_DEV_DM
215 215
216 If unsure, say N. 216 If unsure, say N.
217 217
218config DM_MQ_DEFAULT
219 bool "request-based DM: use blk-mq I/O path by default"
220 depends on BLK_DEV_DM
221 ---help---
222 This option enables the blk-mq based I/O path for request-based
223 DM devices by default. With the option the dm_mod.use_blk_mq
224 module/boot option defaults to Y, without it to N, but it can
225 still be overriden either way.
226
227 If unsure say N.
228
229config DM_DEBUG 218config DM_DEBUG
230 bool "Device mapper debugging support" 219 bool "Device mapper debugging support"
231 depends on BLK_DEV_DM 220 depends on BLK_DEV_DM
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 1b5b9ad9e492..b61aac00ff40 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1200,7 +1200,7 @@ static void queue_demotion(struct smq_policy *mq)
1200 struct policy_work work; 1200 struct policy_work work;
1201 struct entry *e; 1201 struct entry *e;
1202 1202
1203 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) 1203 if (WARN_ON_ONCE(!mq->migrations_allowed))
1204 return; 1204 return;
1205 1205
1206 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); 1206 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c930eaf..224d44503a06 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -112,18 +112,8 @@ struct mapped_device {
112 112
113 struct dm_stats stats; 113 struct dm_stats stats;
114 114
115 struct kthread_worker kworker;
116 struct task_struct *kworker_task;
117
118 /* for request-based merge heuristic in dm_request_fn() */
119 unsigned seq_rq_merge_deadline_usecs;
120 int last_rq_rw;
121 sector_t last_rq_pos;
122 ktime_t last_rq_start_time;
123
124 /* for blk-mq request-based DM support */ 115 /* for blk-mq request-based DM support */
125 struct blk_mq_tag_set *tag_set; 116 struct blk_mq_tag_set *tag_set;
126 bool use_blk_mq:1;
127 bool init_tio_pdu:1; 117 bool init_tio_pdu:1;
128 118
129 struct srcu_struct io_barrier; 119 struct srcu_struct io_barrier;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0481223b1deb..b8eec515a003 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2661,6 +2661,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
2661static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 2661static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2662{ 2662{
2663 struct crypt_config *cc; 2663 struct crypt_config *cc;
2664 const char *devname = dm_table_device_name(ti->table);
2664 int key_size; 2665 int key_size;
2665 unsigned int align_mask; 2666 unsigned int align_mask;
2666 unsigned long long tmpll; 2667 unsigned long long tmpll;
@@ -2806,18 +2807,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2806 } 2807 }
2807 2808
2808 ret = -ENOMEM; 2809 ret = -ENOMEM;
2809 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 2810 cc->io_queue = alloc_workqueue("kcryptd_io/%s",
2811 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
2812 1, devname);
2810 if (!cc->io_queue) { 2813 if (!cc->io_queue) {
2811 ti->error = "Couldn't create kcryptd io queue"; 2814 ti->error = "Couldn't create kcryptd io queue";
2812 goto bad; 2815 goto bad;
2813 } 2816 }
2814 2817
2815 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 2818 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2816 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 2819 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
2820 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
2821 1, devname);
2817 else 2822 else
2818 cc->crypt_queue = alloc_workqueue("kcryptd", 2823 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
2819 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, 2824 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
2820 num_online_cpus()); 2825 num_online_cpus(), devname);
2821 if (!cc->crypt_queue) { 2826 if (!cc->crypt_queue) {
2822 ti->error = "Couldn't create kcryptd queue"; 2827 ti->error = "Couldn't create kcryptd queue";
2823 goto bad; 2828 goto bad;
@@ -2826,7 +2831,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2826 spin_lock_init(&cc->write_thread_lock); 2831 spin_lock_init(&cc->write_thread_lock);
2827 cc->write_tree = RB_ROOT; 2832 cc->write_tree = RB_ROOT;
2828 2833
2829 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); 2834 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
2830 if (IS_ERR(cc->write_thread)) { 2835 if (IS_ERR(cc->write_thread)) {
2831 ret = PTR_ERR(cc->write_thread); 2836 ret = PTR_ERR(cc->write_thread);
2832 cc->write_thread = NULL; 2837 cc->write_thread = NULL;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b810ea77e6b1..f666778ad237 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1720,8 +1720,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
1720} 1720}
1721 1721
1722static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, 1722static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
1723 int ioctl_flags, 1723 int ioctl_flags, struct dm_ioctl **param, int *param_flags)
1724 struct dm_ioctl **param, int *param_flags)
1725{ 1724{
1726 struct dm_ioctl *dmi; 1725 struct dm_ioctl *dmi;
1727 int secure_data; 1726 int secure_data;
@@ -1762,18 +1761,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
1762 1761
1763 *param_flags |= DM_PARAMS_MALLOC; 1762 *param_flags |= DM_PARAMS_MALLOC;
1764 1763
1765 if (copy_from_user(dmi, user, param_kernel->data_size)) 1764 /* Copy from param_kernel (which was already copied from user) */
1766 goto bad; 1765 memcpy(dmi, param_kernel, minimum_data_size);
1767 1766
1768data_copied: 1767 if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
1769 /* 1768 param_kernel->data_size - minimum_data_size))
1770 * Abort if something changed the ioctl data while it was being copied.
1771 */
1772 if (dmi->data_size != param_kernel->data_size) {
1773 DMERR("rejecting ioctl: data size modified while processing parameters");
1774 goto bad; 1769 goto bad;
1775 } 1770data_copied:
1776
1777 /* Wipe the user buffer so we do not return it to userspace */ 1771 /* Wipe the user buffer so we do not return it to userspace */
1778 if (secure_data && clear_user(user, param_kernel->data_size)) 1772 if (secure_data && clear_user(user, param_kernel->data_size))
1779 goto bad; 1773 goto bad;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 419362c2d8ac..d6a66921daf4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -203,14 +203,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
203static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) 203static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
204{ 204{
205 if (m->queue_mode == DM_TYPE_NONE) { 205 if (m->queue_mode == DM_TYPE_NONE) {
206 /* 206 m->queue_mode = DM_TYPE_REQUEST_BASED;
207 * Default to request-based.
208 */
209 if (dm_use_blk_mq(dm_table_get_md(ti->table)))
210 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
211 else
212 m->queue_mode = DM_TYPE_REQUEST_BASED;
213
214 } else if (m->queue_mode == DM_TYPE_BIO_BASED) { 207 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
215 INIT_WORK(&m->process_queued_bios, process_queued_bios); 208 INIT_WORK(&m->process_queued_bios, process_queued_bios);
216 /* 209 /*
@@ -537,10 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
537 * get the queue busy feedback (via BLK_STS_RESOURCE), 530 * get the queue busy feedback (via BLK_STS_RESOURCE),
538 * otherwise I/O merging can suffer. 531 * otherwise I/O merging can suffer.
539 */ 532 */
540 if (q->mq_ops) 533 return DM_MAPIO_REQUEUE;
541 return DM_MAPIO_REQUEUE;
542 else
543 return DM_MAPIO_DELAY_REQUEUE;
544 } 534 }
545 clone->bio = clone->biotail = NULL; 535 clone->bio = clone->biotail = NULL;
546 clone->rq_disk = bdev->bd_disk; 536 clone->rq_disk = bdev->bd_disk;
@@ -668,7 +658,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
668 658
669static void process_queued_io_list(struct multipath *m) 659static void process_queued_io_list(struct multipath *m)
670{ 660{
671 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 661 if (m->queue_mode == DM_TYPE_REQUEST_BASED)
672 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); 662 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
673 else if (m->queue_mode == DM_TYPE_BIO_BASED) 663 else if (m->queue_mode == DM_TYPE_BIO_BASED)
674 queue_work(kmultipathd, &m->process_queued_bios); 664 queue_work(kmultipathd, &m->process_queued_bios);
@@ -1089,10 +1079,9 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1089 1079
1090 if (!strcasecmp(queue_mode_name, "bio")) 1080 if (!strcasecmp(queue_mode_name, "bio"))
1091 m->queue_mode = DM_TYPE_BIO_BASED; 1081 m->queue_mode = DM_TYPE_BIO_BASED;
1092 else if (!strcasecmp(queue_mode_name, "rq")) 1082 else if (!strcasecmp(queue_mode_name, "rq") ||
1083 !strcasecmp(queue_mode_name, "mq"))
1093 m->queue_mode = DM_TYPE_REQUEST_BASED; 1084 m->queue_mode = DM_TYPE_REQUEST_BASED;
1094 else if (!strcasecmp(queue_mode_name, "mq"))
1095 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1096 else { 1085 else {
1097 ti->error = "Unknown 'queue_mode' requested"; 1086 ti->error = "Unknown 'queue_mode' requested";
1098 r = -EINVAL; 1087 r = -EINVAL;
@@ -1726,9 +1715,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1726 case DM_TYPE_BIO_BASED: 1715 case DM_TYPE_BIO_BASED:
1727 DMEMIT("queue_mode bio "); 1716 DMEMIT("queue_mode bio ");
1728 break; 1717 break;
1729 case DM_TYPE_MQ_REQUEST_BASED:
1730 DMEMIT("queue_mode mq ");
1731 break;
1732 default: 1718 default:
1733 WARN_ON_ONCE(true); 1719 WARN_ON_ONCE(true);
1734 break; 1720 break;
@@ -1972,7 +1958,7 @@ static int multipath_busy(struct dm_target *ti)
1972 1958
1973 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */ 1959 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1974 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1960 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1975 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); 1961 return (m->queue_mode != DM_TYPE_REQUEST_BASED);
1976 1962
1977 /* Guess which priority_group will be used at next mapping time */ 1963 /* Guess which priority_group will be used at next mapping time */
1978 pg = READ_ONCE(m->current_pg); 1964 pg = READ_ONCE(m->current_pg);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c44925e4e481..e1dd1622a290 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2475,7 +2475,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2475 } 2475 }
2476 2476
2477 /* Enable bitmap creation for RAID levels != 0 */ 2477 /* Enable bitmap creation for RAID levels != 0 */
2478 mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); 2478 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2479 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; 2479 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2480 2480
2481 if (!test_and_clear_bit(FirstUse, &rdev->flags)) { 2481 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8dd298..7cd36e4d1310 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,19 +23,6 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
23#define RESERVED_REQUEST_BASED_IOS 256 23#define RESERVED_REQUEST_BASED_IOS 256
24static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 24static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25 25
26static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
27
28bool dm_use_blk_mq_default(void)
29{
30 return use_blk_mq;
31}
32
33bool dm_use_blk_mq(struct mapped_device *md)
34{
35 return md->use_blk_mq;
36}
37EXPORT_SYMBOL_GPL(dm_use_blk_mq);
38
39unsigned dm_get_reserved_rq_based_ios(void) 26unsigned dm_get_reserved_rq_based_ios(void)
40{ 27{
41 return __dm_get_module_param(&reserved_rq_based_ios, 28 return __dm_get_module_param(&reserved_rq_based_ios,
@@ -59,41 +46,13 @@ int dm_request_based(struct mapped_device *md)
59 return queue_is_rq_based(md->queue); 46 return queue_is_rq_based(md->queue);
60} 47}
61 48
62static void dm_old_start_queue(struct request_queue *q) 49void dm_start_queue(struct request_queue *q)
63{
64 unsigned long flags;
65
66 spin_lock_irqsave(q->queue_lock, flags);
67 if (blk_queue_stopped(q))
68 blk_start_queue(q);
69 spin_unlock_irqrestore(q->queue_lock, flags);
70}
71
72static void dm_mq_start_queue(struct request_queue *q)
73{ 50{
74 blk_mq_unquiesce_queue(q); 51 blk_mq_unquiesce_queue(q);
75 blk_mq_kick_requeue_list(q); 52 blk_mq_kick_requeue_list(q);
76} 53}
77 54
78void dm_start_queue(struct request_queue *q) 55void dm_stop_queue(struct request_queue *q)
79{
80 if (!q->mq_ops)
81 dm_old_start_queue(q);
82 else
83 dm_mq_start_queue(q);
84}
85
86static void dm_old_stop_queue(struct request_queue *q)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(q->queue_lock, flags);
91 if (!blk_queue_stopped(q))
92 blk_stop_queue(q);
93 spin_unlock_irqrestore(q->queue_lock, flags);
94}
95
96static void dm_mq_stop_queue(struct request_queue *q)
97{ 56{
98 if (blk_mq_queue_stopped(q)) 57 if (blk_mq_queue_stopped(q))
99 return; 58 return;
@@ -101,14 +60,6 @@ static void dm_mq_stop_queue(struct request_queue *q)
101 blk_mq_quiesce_queue(q); 60 blk_mq_quiesce_queue(q);
102} 61}
103 62
104void dm_stop_queue(struct request_queue *q)
105{
106 if (!q->mq_ops)
107 dm_old_stop_queue(q);
108 else
109 dm_mq_stop_queue(q);
110}
111
112/* 63/*
113 * Partial completion handling for request-based dm 64 * Partial completion handling for request-based dm
114 */ 65 */
@@ -179,9 +130,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
179 */ 130 */
180static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 131static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
181{ 132{
182 struct request_queue *q = md->queue;
183 unsigned long flags;
184
185 atomic_dec(&md->pending[rw]); 133 atomic_dec(&md->pending[rw]);
186 134
187 /* nudge anyone waiting on suspend queue */ 135 /* nudge anyone waiting on suspend queue */
@@ -189,18 +137,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
189 wake_up(&md->wait); 137 wake_up(&md->wait);
190 138
191 /* 139 /*
192 * Run this off this callpath, as drivers could invoke end_io while
193 * inside their request_fn (and holding the queue lock). Calling
194 * back into ->request_fn() could deadlock attempting to grab the
195 * queue lock again.
196 */
197 if (!q->mq_ops && run_queue) {
198 spin_lock_irqsave(q->queue_lock, flags);
199 blk_run_queue_async(q);
200 spin_unlock_irqrestore(q->queue_lock, flags);
201 }
202
203 /*
204 * dm_put() must be at the end of this function. See the comment above 140 * dm_put() must be at the end of this function. See the comment above
205 */ 141 */
206 dm_put(md); 142 dm_put(md);
@@ -222,27 +158,10 @@ static void dm_end_request(struct request *clone, blk_status_t error)
222 tio->ti->type->release_clone_rq(clone); 158 tio->ti->type->release_clone_rq(clone);
223 159
224 rq_end_stats(md, rq); 160 rq_end_stats(md, rq);
225 if (!rq->q->mq_ops) 161 blk_mq_end_request(rq, error);
226 blk_end_request_all(rq, error);
227 else
228 blk_mq_end_request(rq, error);
229 rq_completed(md, rw, true); 162 rq_completed(md, rw, true);
230} 163}
231 164
232/*
233 * Requeue the original request of a clone.
234 */
235static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms)
236{
237 struct request_queue *q = rq->q;
238 unsigned long flags;
239
240 spin_lock_irqsave(q->queue_lock, flags);
241 blk_requeue_request(q, rq);
242 blk_delay_queue(q, delay_ms);
243 spin_unlock_irqrestore(q->queue_lock, flags);
244}
245
246static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) 165static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
247{ 166{
248 blk_mq_delay_kick_requeue_list(q, msecs); 167 blk_mq_delay_kick_requeue_list(q, msecs);
@@ -273,11 +192,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
273 tio->ti->type->release_clone_rq(tio->clone); 192 tio->ti->type->release_clone_rq(tio->clone);
274 } 193 }
275 194
276 if (!rq->q->mq_ops) 195 dm_mq_delay_requeue_request(rq, delay_ms);
277 dm_old_requeue_request(rq, delay_ms);
278 else
279 dm_mq_delay_requeue_request(rq, delay_ms);
280
281 rq_completed(md, rw, false); 196 rq_completed(md, rw, false);
282} 197}
283 198
@@ -340,10 +255,7 @@ static void dm_softirq_done(struct request *rq)
340 255
341 rq_end_stats(md, rq); 256 rq_end_stats(md, rq);
342 rw = rq_data_dir(rq); 257 rw = rq_data_dir(rq);
343 if (!rq->q->mq_ops) 258 blk_mq_end_request(rq, tio->error);
344 blk_end_request_all(rq, tio->error);
345 else
346 blk_mq_end_request(rq, tio->error);
347 rq_completed(md, rw, false); 259 rq_completed(md, rw, false);
348 return; 260 return;
349 } 261 }
@@ -363,17 +275,14 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
363 struct dm_rq_target_io *tio = tio_from_request(rq); 275 struct dm_rq_target_io *tio = tio_from_request(rq);
364 276
365 tio->error = error; 277 tio->error = error;
366 if (!rq->q->mq_ops) 278 blk_mq_complete_request(rq);
367 blk_complete_request(rq);
368 else
369 blk_mq_complete_request(rq);
370} 279}
371 280
372/* 281/*
373 * Complete the not-mapped clone and the original request with the error status 282 * Complete the not-mapped clone and the original request with the error status
374 * through softirq context. 283 * through softirq context.
375 * Target's rq_end_io() function isn't called. 284 * Target's rq_end_io() function isn't called.
376 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 285 * This may be used when the target's clone_and_map_rq() function fails.
377 */ 286 */
378static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) 287static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
379{ 288{
@@ -381,21 +290,10 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
381 dm_complete_request(rq, error); 290 dm_complete_request(rq, error);
382} 291}
383 292
384/*
385 * Called with the clone's queue lock held (in the case of .request_fn)
386 */
387static void end_clone_request(struct request *clone, blk_status_t error) 293static void end_clone_request(struct request *clone, blk_status_t error)
388{ 294{
389 struct dm_rq_target_io *tio = clone->end_io_data; 295 struct dm_rq_target_io *tio = clone->end_io_data;
390 296
391 /*
392 * Actual request completion is done in a softirq context which doesn't
393 * hold the clone's queue lock. Otherwise, deadlock could occur because:
394 * - another request may be submitted by the upper level driver
395 * of the stacking during the completion
396 * - the submission which requires queue lock may be done
397 * against this clone's queue
398 */
399 dm_complete_request(tio->orig, error); 297 dm_complete_request(tio->orig, error);
400} 298}
401 299
@@ -446,8 +344,6 @@ static int setup_clone(struct request *clone, struct request *rq,
446 return 0; 344 return 0;
447} 345}
448 346
449static void map_tio_request(struct kthread_work *work);
450
451static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 347static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
452 struct mapped_device *md) 348 struct mapped_device *md)
453{ 349{
@@ -464,8 +360,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
464 */ 360 */
465 if (!md->init_tio_pdu) 361 if (!md->init_tio_pdu)
466 memset(&tio->info, 0, sizeof(tio->info)); 362 memset(&tio->info, 0, sizeof(tio->info));
467 if (md->kworker_task)
468 kthread_init_work(&tio->work, map_tio_request);
469} 363}
470 364
471/* 365/*
@@ -504,10 +398,7 @@ check_again:
504 blk_rq_unprep_clone(clone); 398 blk_rq_unprep_clone(clone);
505 tio->ti->type->release_clone_rq(clone); 399 tio->ti->type->release_clone_rq(clone);
506 tio->clone = NULL; 400 tio->clone = NULL;
507 if (!rq->q->mq_ops) 401 r = DM_MAPIO_REQUEUE;
508 r = DM_MAPIO_DELAY_REQUEUE;
509 else
510 r = DM_MAPIO_REQUEUE;
511 goto check_again; 402 goto check_again;
512 } 403 }
513 break; 404 break;
@@ -530,20 +421,23 @@ check_again:
530 return r; 421 return r;
531} 422}
532 423
424/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
425ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
426{
427 return sprintf(buf, "%u\n", 0);
428}
429
430ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
431 const char *buf, size_t count)
432{
433 return count;
434}
435
533static void dm_start_request(struct mapped_device *md, struct request *orig) 436static void dm_start_request(struct mapped_device *md, struct request *orig)
534{ 437{
535 if (!orig->q->mq_ops) 438 blk_mq_start_request(orig);
536 blk_start_request(orig);
537 else
538 blk_mq_start_request(orig);
539 atomic_inc(&md->pending[rq_data_dir(orig)]); 439 atomic_inc(&md->pending[rq_data_dir(orig)]);
540 440
541 if (md->seq_rq_merge_deadline_usecs) {
542 md->last_rq_pos = rq_end_sector(orig);
543 md->last_rq_rw = rq_data_dir(orig);
544 md->last_rq_start_time = ktime_get();
545 }
546
547 if (unlikely(dm_stats_used(&md->stats))) { 441 if (unlikely(dm_stats_used(&md->stats))) {
548 struct dm_rq_target_io *tio = tio_from_request(orig); 442 struct dm_rq_target_io *tio = tio_from_request(orig);
549 tio->duration_jiffies = jiffies; 443 tio->duration_jiffies = jiffies;
@@ -563,8 +457,10 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
563 dm_get(md); 457 dm_get(md);
564} 458}
565 459
566static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq) 460static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
461 unsigned int hctx_idx, unsigned int numa_node)
567{ 462{
463 struct mapped_device *md = set->driver_data;
568 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 464 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
569 465
570 /* 466 /*
@@ -581,163 +477,6 @@ static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
581 return 0; 477 return 0;
582} 478}
583 479
584static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
585{
586 return __dm_rq_init_rq(q->rq_alloc_data, rq);
587}
588
589static void map_tio_request(struct kthread_work *work)
590{
591 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
592
593 if (map_request(tio) == DM_MAPIO_REQUEUE)
594 dm_requeue_original_request(tio, false);
595}
596
597ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
598{
599 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
600}
601
602#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
603
604ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
605 const char *buf, size_t count)
606{
607 unsigned deadline;
608
609 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
610 return count;
611
612 if (kstrtouint(buf, 10, &deadline))
613 return -EINVAL;
614
615 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
616 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
617
618 md->seq_rq_merge_deadline_usecs = deadline;
619
620 return count;
621}
622
623static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
624{
625 ktime_t kt_deadline;
626
627 if (!md->seq_rq_merge_deadline_usecs)
628 return false;
629
630 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
631 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
632
633 return !ktime_after(ktime_get(), kt_deadline);
634}
635
636/*
637 * q->request_fn for old request-based dm.
638 * Called with the queue lock held.
639 */
640static void dm_old_request_fn(struct request_queue *q)
641{
642 struct mapped_device *md = q->queuedata;
643 struct dm_target *ti = md->immutable_target;
644 struct request *rq;
645 struct dm_rq_target_io *tio;
646 sector_t pos = 0;
647
648 if (unlikely(!ti)) {
649 int srcu_idx;
650 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
651
652 if (unlikely(!map)) {
653 dm_put_live_table(md, srcu_idx);
654 return;
655 }
656 ti = dm_table_find_target(map, pos);
657 dm_put_live_table(md, srcu_idx);
658 }
659
660 /*
661 * For suspend, check blk_queue_stopped() and increment
662 * ->pending within a single queue_lock not to increment the
663 * number of in-flight I/Os after the queue is stopped in
664 * dm_suspend().
665 */
666 while (!blk_queue_stopped(q)) {
667 rq = blk_peek_request(q);
668 if (!rq)
669 return;
670
671 /* always use block 0 to find the target for flushes for now */
672 pos = 0;
673 if (req_op(rq) != REQ_OP_FLUSH)
674 pos = blk_rq_pos(rq);
675
676 if ((dm_old_request_peeked_before_merge_deadline(md) &&
677 md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
678 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
679 (ti->type->busy && ti->type->busy(ti))) {
680 blk_delay_queue(q, 10);
681 return;
682 }
683
684 dm_start_request(md, rq);
685
686 tio = tio_from_request(rq);
687 init_tio(tio, rq, md);
688 /* Establish tio->ti before queuing work (map_tio_request) */
689 tio->ti = ti;
690 kthread_queue_work(&md->kworker, &tio->work);
691 BUG_ON(!irqs_disabled());
692 }
693}
694
695/*
696 * Fully initialize a .request_fn request-based queue.
697 */
698int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
699{
700 struct dm_target *immutable_tgt;
701
702 /* Fully initialize the queue */
703 md->queue->cmd_size = sizeof(struct dm_rq_target_io);
704 md->queue->rq_alloc_data = md;
705 md->queue->request_fn = dm_old_request_fn;
706 md->queue->init_rq_fn = dm_rq_init_rq;
707
708 immutable_tgt = dm_table_get_immutable_target(t);
709 if (immutable_tgt && immutable_tgt->per_io_data_size) {
710 /* any target-specific per-io data is immediately after the tio */
711 md->queue->cmd_size += immutable_tgt->per_io_data_size;
712 md->init_tio_pdu = true;
713 }
714 if (blk_init_allocated_queue(md->queue) < 0)
715 return -EINVAL;
716
717 /* disable dm_old_request_fn's merge heuristic by default */
718 md->seq_rq_merge_deadline_usecs = 0;
719
720 blk_queue_softirq_done(md->queue, dm_softirq_done);
721
722 /* Initialize the request-based DM worker thread */
723 kthread_init_worker(&md->kworker);
724 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
725 "kdmwork-%s", dm_device_name(md));
726 if (IS_ERR(md->kworker_task)) {
727 int error = PTR_ERR(md->kworker_task);
728 md->kworker_task = NULL;
729 return error;
730 }
731
732 return 0;
733}
734
735static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
736 unsigned int hctx_idx, unsigned int numa_node)
737{
738 return __dm_rq_init_rq(set->driver_data, rq);
739}
740
741static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 480static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
742 const struct blk_mq_queue_data *bd) 481 const struct blk_mq_queue_data *bd)
743{ 482{
@@ -790,11 +529,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
790 struct dm_target *immutable_tgt; 529 struct dm_target *immutable_tgt;
791 int err; 530 int err;
792 531
793 if (!dm_table_all_blk_mq_devices(t)) {
794 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
795 return -EINVAL;
796 }
797
798 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); 532 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
799 if (!md->tag_set) 533 if (!md->tag_set)
800 return -ENOMEM; 534 return -ENOMEM;
@@ -845,6 +579,8 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
845module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 579module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
846MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 580MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
847 581
582/* Unused, but preserved for userspace compatibility */
583static bool use_blk_mq = true;
848module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 584module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
849MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 585MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
850 586
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index f43c45460aac..b39245545229 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -46,10 +46,6 @@ struct dm_rq_clone_bio_info {
46 struct bio clone; 46 struct bio clone;
47}; 47};
48 48
49bool dm_use_blk_mq_default(void);
50bool dm_use_blk_mq(struct mapped_device *md);
51
52int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
53int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t); 49int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
54void dm_mq_cleanup_mapped_device(struct mapped_device *md); 50void dm_mq_cleanup_mapped_device(struct mapped_device *md);
55 51
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index c209b8a19b84..a05fcd50e1b9 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -92,7 +92,8 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
92 92
93static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf) 93static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
94{ 94{
95 sprintf(buf, "%d\n", dm_use_blk_mq(md)); 95 /* Purely for userspace compatibility */
96 sprintf(buf, "%d\n", true);
96 97
97 return strlen(buf); 98 return strlen(buf);
98} 99}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index fb4bea20657b..9038c302d5c2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -47,7 +47,6 @@ struct dm_table {
47 47
48 bool integrity_supported:1; 48 bool integrity_supported:1;
49 bool singleton:1; 49 bool singleton:1;
50 bool all_blk_mq:1;
51 unsigned integrity_added:1; 50 unsigned integrity_added:1;
52 51
53 /* 52 /*
@@ -872,8 +871,7 @@ static bool __table_type_bio_based(enum dm_queue_mode table_type)
872 871
873static bool __table_type_request_based(enum dm_queue_mode table_type) 872static bool __table_type_request_based(enum dm_queue_mode table_type)
874{ 873{
875 return (table_type == DM_TYPE_REQUEST_BASED || 874 return table_type == DM_TYPE_REQUEST_BASED;
876 table_type == DM_TYPE_MQ_REQUEST_BASED);
877} 875}
878 876
879void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) 877void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
@@ -999,10 +997,6 @@ verify_bio_based:
999 997
1000 BUG_ON(!request_based); /* No targets in this table */ 998 BUG_ON(!request_based); /* No targets in this table */
1001 999
1002 /*
1003 * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
1004 * having a compatible target use dm_table_set_type.
1005 */
1006 t->type = DM_TYPE_REQUEST_BASED; 1000 t->type = DM_TYPE_REQUEST_BASED;
1007 1001
1008verify_rq_based: 1002verify_rq_based:
@@ -1022,11 +1016,9 @@ verify_rq_based:
1022 int srcu_idx; 1016 int srcu_idx;
1023 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); 1017 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
1024 1018
1025 /* inherit live table's type and all_blk_mq */ 1019 /* inherit live table's type */
1026 if (live_table) { 1020 if (live_table)
1027 t->type = live_table->type; 1021 t->type = live_table->type;
1028 t->all_blk_mq = live_table->all_blk_mq;
1029 }
1030 dm_put_live_table(t->md, srcu_idx); 1022 dm_put_live_table(t->md, srcu_idx);
1031 return 0; 1023 return 0;
1032 } 1024 }
@@ -1046,17 +1038,10 @@ verify_rq_based:
1046 DMERR("table load rejected: including non-request-stackable devices"); 1038 DMERR("table load rejected: including non-request-stackable devices");
1047 return -EINVAL; 1039 return -EINVAL;
1048 } 1040 }
1049 if (v.sq_count && v.mq_count) { 1041 if (v.sq_count > 0) {
1050 DMERR("table load rejected: not all devices are blk-mq request-stackable"); 1042 DMERR("table load rejected: not all devices are blk-mq request-stackable");
1051 return -EINVAL; 1043 return -EINVAL;
1052 } 1044 }
1053 t->all_blk_mq = v.mq_count > 0;
1054
1055 if (!t->all_blk_mq &&
1056 (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
1057 DMERR("table load rejected: all devices are not blk-mq request-stackable");
1058 return -EINVAL;
1059 }
1060 1045
1061 return 0; 1046 return 0;
1062} 1047}
@@ -1105,11 +1090,6 @@ bool dm_table_request_based(struct dm_table *t)
1105 return __table_type_request_based(dm_table_get_type(t)); 1090 return __table_type_request_based(dm_table_get_type(t));
1106} 1091}
1107 1092
1108bool dm_table_all_blk_mq_devices(struct dm_table *t)
1109{
1110 return t->all_blk_mq;
1111}
1112
1113static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) 1093static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1114{ 1094{
1115 enum dm_queue_mode type = dm_table_get_type(t); 1095 enum dm_queue_mode type = dm_table_get_type(t);
@@ -2089,26 +2069,24 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
2089} 2069}
2090EXPORT_SYMBOL(dm_table_get_md); 2070EXPORT_SYMBOL(dm_table_get_md);
2091 2071
2072const char *dm_table_device_name(struct dm_table *t)
2073{
2074 return dm_device_name(t->md);
2075}
2076EXPORT_SYMBOL_GPL(dm_table_device_name);
2077
2092void dm_table_run_md_queue_async(struct dm_table *t) 2078void dm_table_run_md_queue_async(struct dm_table *t)
2093{ 2079{
2094 struct mapped_device *md; 2080 struct mapped_device *md;
2095 struct request_queue *queue; 2081 struct request_queue *queue;
2096 unsigned long flags;
2097 2082
2098 if (!dm_table_request_based(t)) 2083 if (!dm_table_request_based(t))
2099 return; 2084 return;
2100 2085
2101 md = dm_table_get_md(t); 2086 md = dm_table_get_md(t);
2102 queue = dm_get_md_queue(md); 2087 queue = dm_get_md_queue(md);
2103 if (queue) { 2088 if (queue)
2104 if (queue->mq_ops) 2089 blk_mq_run_hw_queues(queue, true);
2105 blk_mq_run_hw_queues(queue, true);
2106 else {
2107 spin_lock_irqsave(queue->queue_lock, flags);
2108 blk_run_queue_async(queue);
2109 spin_unlock_irqrestore(queue->queue_lock, flags);
2110 }
2111 }
2112} 2090}
2113EXPORT_SYMBOL(dm_table_run_md_queue_async); 2091EXPORT_SYMBOL(dm_table_run_md_queue_async);
2114 2092
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index aaf1ad481ee8..0bd8d498b3b9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -325,7 +325,7 @@ struct thin_c {
325 * Ensures the thin is not destroyed until the worker has finished 325 * Ensures the thin is not destroyed until the worker has finished
326 * iterating the active_thins list. 326 * iterating the active_thins list.
327 */ 327 */
328 atomic_t refcount; 328 refcount_t refcount;
329 struct completion can_destroy; 329 struct completion can_destroy;
330}; 330};
331 331
@@ -4044,12 +4044,12 @@ static struct target_type pool_target = {
4044 *--------------------------------------------------------------*/ 4044 *--------------------------------------------------------------*/
4045static void thin_get(struct thin_c *tc) 4045static void thin_get(struct thin_c *tc)
4046{ 4046{
4047 atomic_inc(&tc->refcount); 4047 refcount_inc(&tc->refcount);
4048} 4048}
4049 4049
4050static void thin_put(struct thin_c *tc) 4050static void thin_put(struct thin_c *tc)
4051{ 4051{
4052 if (atomic_dec_and_test(&tc->refcount)) 4052 if (refcount_dec_and_test(&tc->refcount))
4053 complete(&tc->can_destroy); 4053 complete(&tc->can_destroy);
4054} 4054}
4055 4055
@@ -4193,7 +4193,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
4193 r = -EINVAL; 4193 r = -EINVAL;
4194 goto bad; 4194 goto bad;
4195 } 4195 }
4196 atomic_set(&tc->refcount, 1); 4196 refcount_set(&tc->refcount, 1);
4197 init_completion(&tc->can_destroy); 4197 init_completion(&tc->can_destroy);
4198 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 4198 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4199 spin_unlock_irqrestore(&tc->pool->lock, flags); 4199 spin_unlock_irqrestore(&tc->pool->lock, flags);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5f1f80d424dd..2d50eec94cd7 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -350,10 +350,7 @@ static struct wc_memory_superblock *sb(struct dm_writecache *wc)
350 350
351static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e) 351static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
352{ 352{
353 if (is_power_of_2(sizeof(struct wc_entry)) && 0) 353 return &sb(wc)->entries[e->index];
354 return &sb(wc)->entries[e - wc->entries];
355 else
356 return &sb(wc)->entries[e->index];
357} 354}
358 355
359static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) 356static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 969954915566..fa68336560c3 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -99,7 +99,7 @@ struct dmz_mblock {
99 struct rb_node node; 99 struct rb_node node;
100 struct list_head link; 100 struct list_head link;
101 sector_t no; 101 sector_t no;
102 atomic_t ref; 102 unsigned int ref;
103 unsigned long state; 103 unsigned long state;
104 struct page *page; 104 struct page *page;
105 void *data; 105 void *data;
@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
296 296
297 RB_CLEAR_NODE(&mblk->node); 297 RB_CLEAR_NODE(&mblk->node);
298 INIT_LIST_HEAD(&mblk->link); 298 INIT_LIST_HEAD(&mblk->link);
299 atomic_set(&mblk->ref, 0); 299 mblk->ref = 0;
300 mblk->state = 0; 300 mblk->state = 0;
301 mblk->no = mblk_no; 301 mblk->no = mblk_no;
302 mblk->data = page_address(mblk->page); 302 mblk->data = page_address(mblk->page);
@@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
339} 339}
340 340
341/* 341/*
342 * Lookup a metadata block in the rbtree. 342 * Lookup a metadata block in the rbtree. If the block is found, increment
343 * its reference count.
343 */ 344 */
344static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd, 345static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
345 sector_t mblk_no) 346 sector_t mblk_no)
346{ 347{
347 struct rb_root *root = &zmd->mblk_rbtree; 348 struct rb_root *root = &zmd->mblk_rbtree;
348 struct rb_node *node = root->rb_node; 349 struct rb_node *node = root->rb_node;
@@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
350 351
351 while (node) { 352 while (node) {
352 mblk = container_of(node, struct dmz_mblock, node); 353 mblk = container_of(node, struct dmz_mblock, node);
353 if (mblk->no == mblk_no) 354 if (mblk->no == mblk_no) {
355 /*
356 * If this is the first reference to the block,
357 * remove it from the LRU list.
358 */
359 mblk->ref++;
360 if (mblk->ref == 1 &&
361 !test_bit(DMZ_META_DIRTY, &mblk->state))
362 list_del_init(&mblk->link);
354 return mblk; 363 return mblk;
364 }
355 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right; 365 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
356 } 366 }
357 367
@@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio)
382} 392}
383 393
384/* 394/*
385 * Read a metadata block from disk. 395 * Read an uncached metadata block from disk and add it to the cache.
386 */ 396 */
387static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, 397static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
388 sector_t mblk_no) 398 sector_t mblk_no)
389{ 399{
390 struct dmz_mblock *mblk; 400 struct dmz_mblock *mblk, *m;
391 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; 401 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
392 struct bio *bio; 402 struct bio *bio;
393 403
394 /* Get block and insert it */ 404 /* Get a new block and a BIO to read it */
395 mblk = dmz_alloc_mblock(zmd, mblk_no); 405 mblk = dmz_alloc_mblock(zmd, mblk_no);
396 if (!mblk) 406 if (!mblk)
397 return NULL; 407 return NULL;
398 408
399 spin_lock(&zmd->mblk_lock);
400 atomic_inc(&mblk->ref);
401 set_bit(DMZ_META_READING, &mblk->state);
402 dmz_insert_mblock(zmd, mblk);
403 spin_unlock(&zmd->mblk_lock);
404
405 bio = bio_alloc(GFP_NOIO, 1); 409 bio = bio_alloc(GFP_NOIO, 1);
406 if (!bio) { 410 if (!bio) {
407 dmz_free_mblock(zmd, mblk); 411 dmz_free_mblock(zmd, mblk);
408 return NULL; 412 return NULL;
409 } 413 }
410 414
415 spin_lock(&zmd->mblk_lock);
416
417 /*
418 * Make sure that another context did not start reading
419 * the block already.
420 */
421 m = dmz_get_mblock_fast(zmd, mblk_no);
422 if (m) {
423 spin_unlock(&zmd->mblk_lock);
424 dmz_free_mblock(zmd, mblk);
425 bio_put(bio);
426 return m;
427 }
428
429 mblk->ref++;
430 set_bit(DMZ_META_READING, &mblk->state);
431 dmz_insert_mblock(zmd, mblk);
432
433 spin_unlock(&zmd->mblk_lock);
434
435 /* Submit read BIO */
411 bio->bi_iter.bi_sector = dmz_blk2sect(block); 436 bio->bi_iter.bi_sector = dmz_blk2sect(block);
412 bio_set_dev(bio, zmd->dev->bdev); 437 bio_set_dev(bio, zmd->dev->bdev);
413 bio->bi_private = mblk; 438 bio->bi_private = mblk;
@@ -484,7 +509,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
484 509
485 spin_lock(&zmd->mblk_lock); 510 spin_lock(&zmd->mblk_lock);
486 511
487 if (atomic_dec_and_test(&mblk->ref)) { 512 mblk->ref--;
513 if (mblk->ref == 0) {
488 if (test_bit(DMZ_META_ERROR, &mblk->state)) { 514 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
489 rb_erase(&mblk->node, &zmd->mblk_rbtree); 515 rb_erase(&mblk->node, &zmd->mblk_rbtree);
490 dmz_free_mblock(zmd, mblk); 516 dmz_free_mblock(zmd, mblk);
@@ -508,18 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
508 534
509 /* Check rbtree */ 535 /* Check rbtree */
510 spin_lock(&zmd->mblk_lock); 536 spin_lock(&zmd->mblk_lock);
511 mblk = dmz_lookup_mblock(zmd, mblk_no); 537 mblk = dmz_get_mblock_fast(zmd, mblk_no);
512 if (mblk) {
513 /* Cache hit: remove block from LRU list */
514 if (atomic_inc_return(&mblk->ref) == 1 &&
515 !test_bit(DMZ_META_DIRTY, &mblk->state))
516 list_del_init(&mblk->link);
517 }
518 spin_unlock(&zmd->mblk_lock); 538 spin_unlock(&zmd->mblk_lock);
519 539
520 if (!mblk) { 540 if (!mblk) {
521 /* Cache miss: read the block from disk */ 541 /* Cache miss: read the block from disk */
522 mblk = dmz_fetch_mblock(zmd, mblk_no); 542 mblk = dmz_get_mblock_slow(zmd, mblk_no);
523 if (!mblk) 543 if (!mblk)
524 return ERR_PTR(-ENOMEM); 544 return ERR_PTR(-ENOMEM);
525 } 545 }
@@ -753,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
753 773
754 spin_lock(&zmd->mblk_lock); 774 spin_lock(&zmd->mblk_lock);
755 clear_bit(DMZ_META_DIRTY, &mblk->state); 775 clear_bit(DMZ_META_DIRTY, &mblk->state);
756 if (atomic_read(&mblk->ref) == 0) 776 if (mblk->ref == 0)
757 list_add_tail(&mblk->link, &zmd->mblk_lru_list); 777 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
758 spin_unlock(&zmd->mblk_lock); 778 spin_unlock(&zmd->mblk_lock);
759 } 779 }
@@ -2308,7 +2328,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2308 mblk = list_first_entry(&zmd->mblk_dirty_list, 2328 mblk = list_first_entry(&zmd->mblk_dirty_list,
2309 struct dmz_mblock, link); 2329 struct dmz_mblock, link);
2310 dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", 2330 dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
2311 (u64)mblk->no, atomic_read(&mblk->ref)); 2331 (u64)mblk->no, mblk->ref);
2312 list_del_init(&mblk->link); 2332 list_del_init(&mblk->link);
2313 rb_erase(&mblk->node, &zmd->mblk_rbtree); 2333 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2314 dmz_free_mblock(zmd, mblk); 2334 dmz_free_mblock(zmd, mblk);
@@ -2326,8 +2346,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2326 root = &zmd->mblk_rbtree; 2346 root = &zmd->mblk_rbtree;
2327 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { 2347 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2328 dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", 2348 dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
2329 (u64)mblk->no, atomic_read(&mblk->ref)); 2349 (u64)mblk->no, mblk->ref);
2330 atomic_set(&mblk->ref, 0); 2350 mblk->ref = 0;
2331 dmz_free_mblock(zmd, mblk); 2351 dmz_free_mblock(zmd, mblk);
2332 } 2352 }
2333 2353
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 12d96a263623..981154e59461 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -19,7 +19,7 @@ struct dmz_bioctx {
19 struct dmz_target *target; 19 struct dmz_target *target;
20 struct dm_zone *zone; 20 struct dm_zone *zone;
21 struct bio *bio; 21 struct bio *bio;
22 atomic_t ref; 22 refcount_t ref;
23 blk_status_t status; 23 blk_status_t status;
24}; 24};
25 25
@@ -28,7 +28,7 @@ struct dmz_bioctx {
28 */ 28 */
29struct dm_chunk_work { 29struct dm_chunk_work {
30 struct work_struct work; 30 struct work_struct work;
31 atomic_t refcount; 31 refcount_t refcount;
32 struct dmz_target *target; 32 struct dmz_target *target;
33 unsigned int chunk; 33 unsigned int chunk;
34 struct bio_list bio_list; 34 struct bio_list bio_list;
@@ -115,7 +115,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
115 if (nr_blocks == dmz_bio_blocks(bio)) { 115 if (nr_blocks == dmz_bio_blocks(bio)) {
116 /* Setup and submit the BIO */ 116 /* Setup and submit the BIO */
117 bio->bi_iter.bi_sector = sector; 117 bio->bi_iter.bi_sector = sector;
118 atomic_inc(&bioctx->ref); 118 refcount_inc(&bioctx->ref);
119 generic_make_request(bio); 119 generic_make_request(bio);
120 return 0; 120 return 0;
121 } 121 }
@@ -134,7 +134,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
134 bio_advance(bio, clone->bi_iter.bi_size); 134 bio_advance(bio, clone->bi_iter.bi_size);
135 135
136 /* Submit the clone */ 136 /* Submit the clone */
137 atomic_inc(&bioctx->ref); 137 refcount_inc(&bioctx->ref);
138 generic_make_request(clone); 138 generic_make_request(clone);
139 139
140 return 0; 140 return 0;
@@ -240,7 +240,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
240 /* Setup and submit the BIO */ 240 /* Setup and submit the BIO */
241 bio_set_dev(bio, dmz->dev->bdev); 241 bio_set_dev(bio, dmz->dev->bdev);
242 bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); 242 bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
243 atomic_inc(&bioctx->ref); 243 refcount_inc(&bioctx->ref);
244 generic_make_request(bio); 244 generic_make_request(bio);
245 245
246 if (dmz_is_seq(zone)) 246 if (dmz_is_seq(zone))
@@ -456,7 +456,7 @@ out:
456 */ 456 */
457static inline void dmz_get_chunk_work(struct dm_chunk_work *cw) 457static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
458{ 458{
459 atomic_inc(&cw->refcount); 459 refcount_inc(&cw->refcount);
460} 460}
461 461
462/* 462/*
@@ -465,7 +465,7 @@ static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
465 */ 465 */
466static void dmz_put_chunk_work(struct dm_chunk_work *cw) 466static void dmz_put_chunk_work(struct dm_chunk_work *cw)
467{ 467{
468 if (atomic_dec_and_test(&cw->refcount)) { 468 if (refcount_dec_and_test(&cw->refcount)) {
469 WARN_ON(!bio_list_empty(&cw->bio_list)); 469 WARN_ON(!bio_list_empty(&cw->bio_list));
470 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk); 470 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
471 kfree(cw); 471 kfree(cw);
@@ -546,7 +546,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
546 goto out; 546 goto out;
547 547
548 INIT_WORK(&cw->work, dmz_chunk_work); 548 INIT_WORK(&cw->work, dmz_chunk_work);
549 atomic_set(&cw->refcount, 0); 549 refcount_set(&cw->refcount, 0);
550 cw->target = dmz; 550 cw->target = dmz;
551 cw->chunk = chunk; 551 cw->chunk = chunk;
552 bio_list_init(&cw->bio_list); 552 bio_list_init(&cw->bio_list);
@@ -599,7 +599,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
599 bioctx->target = dmz; 599 bioctx->target = dmz;
600 bioctx->zone = NULL; 600 bioctx->zone = NULL;
601 bioctx->bio = bio; 601 bioctx->bio = bio;
602 atomic_set(&bioctx->ref, 1); 602 refcount_set(&bioctx->ref, 1);
603 bioctx->status = BLK_STS_OK; 603 bioctx->status = BLK_STS_OK;
604 604
605 /* Set the BIO pending in the flush list */ 605 /* Set the BIO pending in the flush list */
@@ -633,7 +633,7 @@ static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error
633 if (bioctx->status == BLK_STS_OK && *error) 633 if (bioctx->status == BLK_STS_OK && *error)
634 bioctx->status = *error; 634 bioctx->status = *error;
635 635
636 if (!atomic_dec_and_test(&bioctx->ref)) 636 if (!refcount_dec_and_test(&bioctx->ref))
637 return DM_ENDIO_INCOMPLETE; 637 return DM_ENDIO_INCOMPLETE;
638 638
639 /* Done */ 639 /* Done */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6be21dc210a1..c510179a7f84 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1664,7 +1664,7 @@ static blk_qc_t __process_bio(struct mapped_device *md,
1664 * Defend against IO still getting in during teardown 1664 * Defend against IO still getting in during teardown
1665 * - as was seen for a time with nvme-fcloop 1665 * - as was seen for a time with nvme-fcloop
1666 */ 1666 */
1667 if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) { 1667 if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) {
1668 error = -EIO; 1668 error = -EIO;
1669 goto out; 1669 goto out;
1670 } 1670 }
@@ -1806,8 +1806,6 @@ static void dm_wq_work(struct work_struct *work);
1806 1806
1807static void dm_init_normal_md_queue(struct mapped_device *md) 1807static void dm_init_normal_md_queue(struct mapped_device *md)
1808{ 1808{
1809 md->use_blk_mq = false;
1810
1811 /* 1809 /*
1812 * Initialize aspects of queue that aren't relevant for blk-mq 1810 * Initialize aspects of queue that aren't relevant for blk-mq
1813 */ 1811 */
@@ -1818,8 +1816,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
1818{ 1816{
1819 if (md->wq) 1817 if (md->wq)
1820 destroy_workqueue(md->wq); 1818 destroy_workqueue(md->wq);
1821 if (md->kworker_task)
1822 kthread_stop(md->kworker_task);
1823 bioset_exit(&md->bs); 1819 bioset_exit(&md->bs);
1824 bioset_exit(&md->io_bs); 1820 bioset_exit(&md->io_bs);
1825 1821
@@ -1886,7 +1882,6 @@ static struct mapped_device *alloc_dev(int minor)
1886 goto bad_io_barrier; 1882 goto bad_io_barrier;
1887 1883
1888 md->numa_node_id = numa_node_id; 1884 md->numa_node_id = numa_node_id;
1889 md->use_blk_mq = dm_use_blk_mq_default();
1890 md->init_tio_pdu = false; 1885 md->init_tio_pdu = false;
1891 md->type = DM_TYPE_NONE; 1886 md->type = DM_TYPE_NONE;
1892 mutex_init(&md->suspend_lock); 1887 mutex_init(&md->suspend_lock);
@@ -1917,7 +1912,6 @@ static struct mapped_device *alloc_dev(int minor)
1917 INIT_WORK(&md->work, dm_wq_work); 1912 INIT_WORK(&md->work, dm_wq_work);
1918 init_waitqueue_head(&md->eventq); 1913 init_waitqueue_head(&md->eventq);
1919 init_completion(&md->kobj_holder.completion); 1914 init_completion(&md->kobj_holder.completion);
1920 md->kworker_task = NULL;
1921 1915
1922 md->disk->major = _major; 1916 md->disk->major = _major;
1923 md->disk->first_minor = minor; 1917 md->disk->first_minor = minor;
@@ -2217,14 +2211,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2217 2211
2218 switch (type) { 2212 switch (type) {
2219 case DM_TYPE_REQUEST_BASED: 2213 case DM_TYPE_REQUEST_BASED:
2220 dm_init_normal_md_queue(md);
2221 r = dm_old_init_request_queue(md, t);
2222 if (r) {
2223 DMERR("Cannot initialize queue for request-based mapped device");
2224 return r;
2225 }
2226 break;
2227 case DM_TYPE_MQ_REQUEST_BASED:
2228 r = dm_mq_init_request_queue(md, t); 2214 r = dm_mq_init_request_queue(md, t);
2229 if (r) { 2215 if (r) {
2230 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2216 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
@@ -2329,9 +2315,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2329 2315
2330 blk_set_queue_dying(md->queue); 2316 blk_set_queue_dying(md->queue);
2331 2317
2332 if (dm_request_based(md) && md->kworker_task)
2333 kthread_flush_worker(&md->kworker);
2334
2335 /* 2318 /*
2336 * Take suspend_lock so that presuspend and postsuspend methods 2319 * Take suspend_lock so that presuspend and postsuspend methods
2337 * do not race with internal suspend. 2320 * do not race with internal suspend.
@@ -2584,11 +2567,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2584 * Stop md->queue before flushing md->wq in case request-based 2567 * Stop md->queue before flushing md->wq in case request-based
2585 * dm defers requests to md->wq from md->queue. 2568 * dm defers requests to md->wq from md->queue.
2586 */ 2569 */
2587 if (dm_request_based(md)) { 2570 if (dm_request_based(md))
2588 dm_stop_queue(md->queue); 2571 dm_stop_queue(md->queue);
2589 if (md->kworker_task)
2590 kthread_flush_worker(&md->kworker);
2591 }
2592 2572
2593 flush_workqueue(md->wq); 2573 flush_workqueue(md->wq);
2594 2574
@@ -2963,7 +2943,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
2963 goto out; 2943 goto out;
2964 break; 2944 break;
2965 case DM_TYPE_REQUEST_BASED: 2945 case DM_TYPE_REQUEST_BASED:
2966 case DM_TYPE_MQ_REQUEST_BASED:
2967 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2946 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
2968 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2947 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2969 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2948 /* per_io_data_size is used for blk-mq pdu at queue allocation */
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 114a81b27c37..2d539b82ec08 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -70,7 +70,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
70struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); 70struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
71bool dm_table_bio_based(struct dm_table *t); 71bool dm_table_bio_based(struct dm_table *t);
72bool dm_table_request_based(struct dm_table *t); 72bool dm_table_request_based(struct dm_table *t);
73bool dm_table_all_blk_mq_devices(struct dm_table *t);
74void dm_table_free_md_mempools(struct dm_table *t); 73void dm_table_free_md_mempools(struct dm_table *t);
75struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 74struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
76 75