summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-21 13:57:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-21 13:57:33 -0500
commit772c8f6f3bbd3ceb94a89373473083e3e1113554 (patch)
treed2b34e8f1841a169d59adf53074de217a9e0f977 /drivers/md
parentfd4a61e08aa79f2b7835b25c6f94f27bd2d65990 (diff)
parent818551e2b2c662a1b26de6b4f7d6b8411a838d18 (diff)
Merge tag 'for-4.11/linus-merge-signed' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: - blk-mq scheduling framework from me and Omar, with a port of the deadline scheduler for this framework. A port of BFQ from Paolo is in the works, and should be ready for 4.12. - Various fixups and improvements to the above scheduling framework from Omar, Paolo, Bart, me, others. - Cleanup of the exported sysfs blk-mq data into debugfs, from Omar. This allows us to export more information that helps debug hangs or performance issues, without cluttering or abusing the sysfs API. - Fixes for the sbitmap code, the scalable bitmap code that was migrated from blk-mq, from Omar. - Removal of the BLOCK_PC support in struct request, and refactoring of carrying SCSI payloads in the block layer. This cleans up the code nicely, and enables us to kill the SCSI specific parts of struct request, shrinking it down nicely. From Christoph mainly, with help from Hannes. - Support for ranged discard requests and discard merging, also from Christoph. - Support for OPAL in the block layer, and for NVMe as well. Mainly from Scott Bauer, with fixes/updates from various others folks. - Error code fixup for gdrom from Christophe. - cciss pci irq allocation cleanup from Christoph. - Making the cdrom device operations read only, from Kees Cook. - Fixes for duplicate bdi registrations and bdi/queue life time problems from Jan and Dan. - Set of fixes and updates for lightnvm, from Matias and Javier. - A few fixes for nbd from Josef, using idr to name devices and a workqueue deadlock fix on receive. Also marks Josef as the current maintainer of nbd. - Fix from Josef, overwriting queue settings when the number of hardware queues is updated for a blk-mq device. - NVMe fix from Keith, ensuring that we don't repeatedly mark and IO aborted, if we didn't end up aborting it. - SG gap merging fix from Ming Lei for block. - Loop fix also from Ming, fixing a race and crash between setting loop status and IO. - Two block race fixes from Tahsin, fixing request list iteration and fixing a race between device registration and udev device add notifiations. - Double free fix from cgroup writeback, from Tejun. - Another double free fix in blkcg, from Hou Tao. - Partition overflow fix for EFI from Alden Tondettar. * tag 'for-4.11/linus-merge-signed' of git://git.kernel.dk/linux-block: (156 commits) nvme: Check for Security send/recv support before issuing commands. block/sed-opal: allocate struct opal_dev dynamically block/sed-opal: tone down not supported warnings block: don't defer flushes on blk-mq + scheduling blk-mq-sched: ask scheduler for work, if we failed dispatching leftovers blk-mq: don't special case flush inserts for blk-mq-sched blk-mq-sched: don't add flushes to the head of requeue queue blk-mq: have blk_mq_dispatch_rq_list() return if we queued IO or not block: do not allow updates through sysfs until registration completes lightnvm: set default lun range when no luns are specified lightnvm: fix off-by-one error on target initialization Maintainers: Modify SED list from nvme to block Move stack parameters for sed_ioctl to prevent oversized stack with CONFIG_KASAN uapi: sed-opal fix IOW for activate lsp to use correct struct cdrom: Make device operations read-only elevator: fix loading wrong elevator type for blk-mq devices cciss: switch to pci_irq_alloc_vectors block/loop: fix race between I/O and set_status blk-mq-sched: don't hold queue_lock when calling exit_icq block: set make_request_fn manually in blk_mq_update_nr_hw_queues ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/dm-cache-target.c15
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-mpath.c132
-rw-r--r--drivers/md/dm-rq.c268
-rw-r--r--drivers/md/dm-rq.h2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-target.c7
-rw-r--r--drivers/md/dm-thin.c15
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5.c12
20 files changed, 142 insertions, 423 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 76d20875503c..709c9cc34369 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
666 s->iop.write_prio = 0; 666 s->iop.write_prio = 0;
667 s->iop.error = 0; 667 s->iop.error = 0;
668 s->iop.flags = 0; 668 s->iop.flags = 0;
669 s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0; 669 s->iop.flush_journal = op_is_flush(bio->bi_opf);
670 s->iop.wq = bcache_wq; 670 s->iop.wq = bcache_wq;
671 671
672 return s; 672 return s;
@@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits)
1009 struct request_queue *q = bdev_get_queue(dc->bdev); 1009 struct request_queue *q = bdev_get_queue(dc->bdev);
1010 int ret = 0; 1010 int ret = 0;
1011 1011
1012 if (bdi_congested(&q->backing_dev_info, bits)) 1012 if (bdi_congested(q->backing_dev_info, bits))
1013 return 1; 1013 return 1;
1014 1014
1015 if (cached_dev_get(dc)) { 1015 if (cached_dev_get(dc)) {
@@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits)
1018 1018
1019 for_each_cache(ca, d->c, i) { 1019 for_each_cache(ca, d->c, i) {
1020 q = bdev_get_queue(ca->bdev); 1020 q = bdev_get_queue(ca->bdev);
1021 ret |= bdi_congested(&q->backing_dev_info, bits); 1021 ret |= bdi_congested(q->backing_dev_info, bits);
1022 } 1022 }
1023 1023
1024 cached_dev_put(dc); 1024 cached_dev_put(dc);
@@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1032 struct gendisk *g = dc->disk.disk; 1032 struct gendisk *g = dc->disk.disk;
1033 1033
1034 g->queue->make_request_fn = cached_dev_make_request; 1034 g->queue->make_request_fn = cached_dev_make_request;
1035 g->queue->backing_dev_info.congested_fn = cached_dev_congested; 1035 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1036 dc->disk.cache_miss = cached_dev_cache_miss; 1036 dc->disk.cache_miss = cached_dev_cache_miss;
1037 dc->disk.ioctl = cached_dev_ioctl; 1037 dc->disk.ioctl = cached_dev_ioctl;
1038} 1038}
@@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits)
1125 1125
1126 for_each_cache(ca, d->c, i) { 1126 for_each_cache(ca, d->c, i) {
1127 q = bdev_get_queue(ca->bdev); 1127 q = bdev_get_queue(ca->bdev);
1128 ret |= bdi_congested(&q->backing_dev_info, bits); 1128 ret |= bdi_congested(q->backing_dev_info, bits);
1129 } 1129 }
1130 1130
1131 return ret; 1131 return ret;
@@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
1136 struct gendisk *g = d->disk; 1136 struct gendisk *g = d->disk;
1137 1137
1138 g->queue->make_request_fn = flash_dev_make_request; 1138 g->queue->make_request_fn = flash_dev_make_request;
1139 g->queue->backing_dev_info.congested_fn = flash_dev_congested; 1139 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1140 d->cache_miss = flash_dev_cache_miss; 1140 d->cache_miss = flash_dev_cache_miss;
1141 d->ioctl = flash_dev_ioctl; 1141 d->ioctl = flash_dev_ioctl;
1142} 1142}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3a19cbc8b230..85e3f21c2514 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
807 blk_queue_make_request(q, NULL); 807 blk_queue_make_request(q, NULL);
808 d->disk->queue = q; 808 d->disk->queue = q;
809 q->queuedata = d; 809 q->queuedata = d;
810 q->backing_dev_info.congested_data = d; 810 q->backing_dev_info->congested_data = d;
811 q->limits.max_hw_sectors = UINT_MAX; 811 q->limits.max_hw_sectors = UINT_MAX;
812 q->limits.max_sectors = UINT_MAX; 812 q->limits.max_sectors = UINT_MAX;
813 q->limits.max_segment_size = UINT_MAX; 813 q->limits.max_segment_size = UINT_MAX;
@@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1132 set_capacity(dc->disk.disk, 1132 set_capacity(dc->disk.disk,
1133 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1133 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1134 1134
1135 dc->disk.disk->queue->backing_dev_info.ra_pages = 1135 dc->disk.disk->queue->backing_dev_info->ra_pages =
1136 max(dc->disk.disk->queue->backing_dev_info.ra_pages, 1136 max(dc->disk.disk->queue->backing_dev_info->ra_pages,
1137 q->backing_dev_info.ra_pages); 1137 q->backing_dev_info->ra_pages);
1138 1138
1139 bch_cached_dev_request_init(dc); 1139 bch_cached_dev_request_init(dc);
1140 bch_cached_dev_writeback_init(dc); 1140 bch_cached_dev_writeback_init(dc);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index e04c61e0839e..894bc14469c8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
787 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 787 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
788 788
789 spin_lock_irqsave(&cache->lock, flags); 789 spin_lock_irqsave(&cache->lock, flags);
790 if (cache->need_tick_bio && 790 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
791 !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
792 bio_op(bio) != REQ_OP_DISCARD) { 791 bio_op(bio) != REQ_OP_DISCARD) {
793 pb->tick = true; 792 pb->tick = true;
794 cache->need_tick_bio = false; 793 cache->need_tick_bio = false;
@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
828 return to_oblock(block_nr); 827 return to_oblock(block_nr);
829} 828}
830 829
831static int bio_triggers_commit(struct cache *cache, struct bio *bio)
832{
833 return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
834}
835
836/* 830/*
837 * You must increment the deferred set whilst the prison cell is held. To 831 * You must increment the deferred set whilst the prison cell is held. To
838 * encourage this, we ask for 'cell' to be passed in. 832 * encourage this, we ask for 'cell' to be passed in.
@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio)
884{ 878{
885 unsigned long flags; 879 unsigned long flags;
886 880
887 if (!bio_triggers_commit(cache, bio)) { 881 if (!op_is_flush(bio->bi_opf)) {
888 accounted_request(cache, bio); 882 accounted_request(cache, bio);
889 return; 883 return;
890 } 884 }
@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache)
1069 1063
1070static bool discard_or_flush(struct bio *bio) 1064static bool discard_or_flush(struct bio *bio)
1071{ 1065{
1072 return bio_op(bio) == REQ_OP_DISCARD || 1066 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1073 bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1074} 1067}
1075 1068
1076static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) 1069static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -2291,7 +2284,7 @@ static void do_waker(struct work_struct *ws)
2291static int is_congested(struct dm_dev *dev, int bdi_bits) 2284static int is_congested(struct dm_dev *dev, int bdi_bits)
2292{ 2285{
2293 struct request_queue *q = bdev_get_queue(dev->bdev); 2286 struct request_queue *q = bdev_get_queue(dev->bdev);
2294 return bdi_congested(&q->backing_dev_info, bdi_bits); 2287 return bdi_congested(q->backing_dev_info, bdi_bits);
2295} 2288}
2296 2289
2297static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 2290static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 40ceba1fe8be..136fda3ff9e5 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -92,7 +92,6 @@ struct mapped_device {
92 * io objects are allocated from here. 92 * io objects are allocated from here.
93 */ 93 */
94 mempool_t *io_pool; 94 mempool_t *io_pool;
95 mempool_t *rq_pool;
96 95
97 struct bio_set *bs; 96 struct bio_set *bs;
98 97
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bf2b2676cb8a..9fab33b113c4 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
1379static int dev_is_congested(struct dm_dev *dev, int bdi_bits) 1379static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
1380{ 1380{
1381 struct request_queue *q = bdev_get_queue(dev->bdev); 1381 struct request_queue *q = bdev_get_queue(dev->bdev);
1382 return bdi_congested(&q->backing_dev_info, bdi_bits); 1382 return bdi_congested(q->backing_dev_info, bdi_bits);
1383} 1383}
1384 1384
1385static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 1385static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3570bcb7a4a4..7f223dbed49f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -92,12 +92,6 @@ struct multipath {
92 92
93 unsigned queue_mode; 93 unsigned queue_mode;
94 94
95 /*
96 * We must use a mempool of dm_mpath_io structs so that we
97 * can resubmit bios on error.
98 */
99 mempool_t *mpio_pool;
100
101 struct mutex work_mutex; 95 struct mutex work_mutex;
102 struct work_struct trigger_event; 96 struct work_struct trigger_event;
103 97
@@ -115,8 +109,6 @@ struct dm_mpath_io {
115 109
116typedef int (*action_fn) (struct pgpath *pgpath); 110typedef int (*action_fn) (struct pgpath *pgpath);
117 111
118static struct kmem_cache *_mpio_cache;
119
120static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 112static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
121static void trigger_event(struct work_struct *work); 113static void trigger_event(struct work_struct *work);
122static void activate_path(struct work_struct *work); 114static void activate_path(struct work_struct *work);
@@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
209 init_waitqueue_head(&m->pg_init_wait); 201 init_waitqueue_head(&m->pg_init_wait);
210 mutex_init(&m->work_mutex); 202 mutex_init(&m->work_mutex);
211 203
212 m->mpio_pool = NULL;
213 m->queue_mode = DM_TYPE_NONE; 204 m->queue_mode = DM_TYPE_NONE;
214 205
215 m->ti = ti; 206 m->ti = ti;
@@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
229 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; 220 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
230 else 221 else
231 m->queue_mode = DM_TYPE_REQUEST_BASED; 222 m->queue_mode = DM_TYPE_REQUEST_BASED;
232 } 223 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
233
234 if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
235 unsigned min_ios = dm_get_reserved_rq_based_ios();
236
237 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
238 if (!m->mpio_pool)
239 return -ENOMEM;
240 }
241 else if (m->queue_mode == DM_TYPE_BIO_BASED) {
242 INIT_WORK(&m->process_queued_bios, process_queued_bios); 224 INIT_WORK(&m->process_queued_bios, process_queued_bios);
243 /* 225 /*
244 * bio-based doesn't support any direct scsi_dh management; 226 * bio-based doesn't support any direct scsi_dh management;
@@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
263 245
264 kfree(m->hw_handler_name); 246 kfree(m->hw_handler_name);
265 kfree(m->hw_handler_params); 247 kfree(m->hw_handler_params);
266 mempool_destroy(m->mpio_pool);
267 kfree(m); 248 kfree(m);
268} 249}
269 250
@@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
272 return info->ptr; 253 return info->ptr;
273} 254}
274 255
275static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
276{
277 struct dm_mpath_io *mpio;
278
279 if (!m->mpio_pool) {
280 /* Use blk-mq pdu memory requested via per_io_data_size */
281 mpio = get_mpio(info);
282 memset(mpio, 0, sizeof(*mpio));
283 return mpio;
284 }
285
286 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
287 if (!mpio)
288 return NULL;
289
290 memset(mpio, 0, sizeof(*mpio));
291 info->ptr = mpio;
292
293 return mpio;
294}
295
296static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
297{
298 /* Only needed for non blk-mq (.request_fn) multipath */
299 if (m->mpio_pool) {
300 struct dm_mpath_io *mpio = info->ptr;
301
302 info->ptr = NULL;
303 mempool_free(mpio, m->mpio_pool);
304 }
305}
306
307static size_t multipath_per_bio_data_size(void) 256static size_t multipath_per_bio_data_size(void)
308{ 257{
309 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); 258 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
@@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
530/* 479/*
531 * Map cloned requests (request-based multipath) 480 * Map cloned requests (request-based multipath)
532 */ 481 */
533static int __multipath_map(struct dm_target *ti, struct request *clone, 482static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
534 union map_info *map_context, 483 union map_info *map_context,
535 struct request *rq, struct request **__clone) 484 struct request **__clone)
536{ 485{
537 struct multipath *m = ti->private; 486 struct multipath *m = ti->private;
538 int r = DM_MAPIO_REQUEUE; 487 int r = DM_MAPIO_REQUEUE;
539 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); 488 size_t nr_bytes = blk_rq_bytes(rq);
540 struct pgpath *pgpath; 489 struct pgpath *pgpath;
541 struct block_device *bdev; 490 struct block_device *bdev;
542 struct dm_mpath_io *mpio; 491 struct dm_mpath_io *mpio = get_mpio(map_context);
492 struct request *clone;
543 493
544 /* Do we need to select a new pgpath? */ 494 /* Do we need to select a new pgpath? */
545 pgpath = lockless_dereference(m->current_pgpath); 495 pgpath = lockless_dereference(m->current_pgpath);
@@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
556 return r; 506 return r;
557 } 507 }
558 508
559 mpio = set_mpio(m, map_context); 509 memset(mpio, 0, sizeof(*mpio));
560 if (!mpio)
561 /* ENOMEM, requeue */
562 return r;
563
564 mpio->pgpath = pgpath; 510 mpio->pgpath = pgpath;
565 mpio->nr_bytes = nr_bytes; 511 mpio->nr_bytes = nr_bytes;
566 512
567 bdev = pgpath->path.dev->bdev; 513 bdev = pgpath->path.dev->bdev;
568 514
569 if (clone) { 515 clone = blk_get_request(bdev_get_queue(bdev),
570 /* 516 rq->cmd_flags | REQ_NOMERGE,
571 * Old request-based interface: allocated clone is passed in. 517 GFP_ATOMIC);
572 * Used by: .request_fn stacked on .request_fn path(s). 518 if (IS_ERR(clone)) {
573 */ 519 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
574 clone->q = bdev_get_queue(bdev); 520 return r;
575 clone->rq_disk = bdev->bd_disk;
576 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
577 } else {
578 /*
579 * blk-mq request-based interface; used by both:
580 * .request_fn stacked on blk-mq path(s) and
581 * blk-mq stacked on blk-mq path(s).
582 */
583 clone = blk_mq_alloc_request(bdev_get_queue(bdev),
584 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
585 if (IS_ERR(clone)) {
586 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
587 clear_request_fn_mpio(m, map_context);
588 return r;
589 }
590 clone->bio = clone->biotail = NULL;
591 clone->rq_disk = bdev->bd_disk;
592 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
593 *__clone = clone;
594 } 521 }
522 clone->bio = clone->biotail = NULL;
523 clone->rq_disk = bdev->bd_disk;
524 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
525 *__clone = clone;
595 526
596 if (pgpath->pg->ps.type->start_io) 527 if (pgpath->pg->ps.type->start_io)
597 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 528 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
600 return DM_MAPIO_REMAPPED; 531 return DM_MAPIO_REMAPPED;
601} 532}
602 533
603static int multipath_map(struct dm_target *ti, struct request *clone,
604 union map_info *map_context)
605{
606 return __multipath_map(ti, clone, map_context, NULL, NULL);
607}
608
609static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
610 union map_info *map_context,
611 struct request **clone)
612{
613 return __multipath_map(ti, NULL, map_context, rq, clone);
614}
615
616static void multipath_release_clone(struct request *clone) 534static void multipath_release_clone(struct request *clone)
617{ 535{
618 blk_mq_free_request(clone); 536 blk_put_request(clone);
619} 537}
620 538
621/* 539/*
@@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1187 ti->num_write_same_bios = 1; 1105 ti->num_write_same_bios = 1;
1188 if (m->queue_mode == DM_TYPE_BIO_BASED) 1106 if (m->queue_mode == DM_TYPE_BIO_BASED)
1189 ti->per_io_data_size = multipath_per_bio_data_size(); 1107 ti->per_io_data_size = multipath_per_bio_data_size();
1190 else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 1108 else
1191 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1109 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1192 1110
1193 return 0; 1111 return 0;
@@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1610 if (ps->type->end_io) 1528 if (ps->type->end_io)
1611 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1529 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1612 } 1530 }
1613 clear_request_fn_mpio(m, map_context);
1614 1531
1615 return r; 1532 return r;
1616} 1533}
@@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
2060 .module = THIS_MODULE, 1977 .module = THIS_MODULE,
2061 .ctr = multipath_ctr, 1978 .ctr = multipath_ctr,
2062 .dtr = multipath_dtr, 1979 .dtr = multipath_dtr,
2063 .map_rq = multipath_map,
2064 .clone_and_map_rq = multipath_clone_and_map, 1980 .clone_and_map_rq = multipath_clone_and_map,
2065 .release_clone_rq = multipath_release_clone, 1981 .release_clone_rq = multipath_release_clone,
2066 .rq_end_io = multipath_end_io, 1982 .rq_end_io = multipath_end_io,
@@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
2080{ 1996{
2081 int r; 1997 int r;
2082 1998
2083 /* allocate a slab for the dm_mpath_ios */
2084 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
2085 if (!_mpio_cache)
2086 return -ENOMEM;
2087
2088 r = dm_register_target(&multipath_target); 1999 r = dm_register_target(&multipath_target);
2089 if (r < 0) { 2000 if (r < 0) {
2090 DMERR("request-based register failed %d", r); 2001 DMERR("request-based register failed %d", r);
@@ -2120,8 +2031,6 @@ bad_alloc_kmpath_handlerd:
2120bad_alloc_kmultipathd: 2031bad_alloc_kmultipathd:
2121 dm_unregister_target(&multipath_target); 2032 dm_unregister_target(&multipath_target);
2122bad_register_target: 2033bad_register_target:
2123 kmem_cache_destroy(_mpio_cache);
2124
2125 return r; 2034 return r;
2126} 2035}
2127 2036
@@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
2131 destroy_workqueue(kmultipathd); 2040 destroy_workqueue(kmultipathd);
2132 2041
2133 dm_unregister_target(&multipath_target); 2042 dm_unregister_target(&multipath_target);
2134 kmem_cache_destroy(_mpio_cache);
2135} 2043}
2136 2044
2137module_init(dm_multipath_init); 2045module_init(dm_multipath_init);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e702fc69a83..67d76f21fecd 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
109 dm_mq_stop_queue(q); 109 dm_mq_stop_queue(q);
110} 110}
111 111
112static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
113 gfp_t gfp_mask)
114{
115 return mempool_alloc(md->io_pool, gfp_mask);
116}
117
118static void free_old_rq_tio(struct dm_rq_target_io *tio)
119{
120 mempool_free(tio, tio->md->io_pool);
121}
122
123static struct request *alloc_old_clone_request(struct mapped_device *md,
124 gfp_t gfp_mask)
125{
126 return mempool_alloc(md->rq_pool, gfp_mask);
127}
128
129static void free_old_clone_request(struct mapped_device *md, struct request *rq)
130{
131 mempool_free(rq, md->rq_pool);
132}
133
134/* 112/*
135 * Partial completion handling for request-based dm 113 * Partial completion handling for request-based dm
136 */ 114 */
@@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
185 163
186static struct dm_rq_target_io *tio_from_request(struct request *rq) 164static struct dm_rq_target_io *tio_from_request(struct request *rq)
187{ 165{
188 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 166 return blk_mq_rq_to_pdu(rq);
189} 167}
190 168
191static void rq_end_stats(struct mapped_device *md, struct request *orig) 169static void rq_end_stats(struct mapped_device *md, struct request *orig)
@@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
233 dm_put(md); 211 dm_put(md);
234} 212}
235 213
236static void free_rq_clone(struct request *clone)
237{
238 struct dm_rq_target_io *tio = clone->end_io_data;
239 struct mapped_device *md = tio->md;
240
241 blk_rq_unprep_clone(clone);
242
243 /*
244 * It is possible for a clone_old_rq() allocated clone to
245 * get passed in -- it may not yet have a request_queue.
246 * This is known to occur if the error target replaces
247 * a multipath target that has a request_fn queue stacked
248 * on blk-mq queue(s).
249 */
250 if (clone->q && clone->q->mq_ops)
251 /* stacked on blk-mq queue(s) */
252 tio->ti->type->release_clone_rq(clone);
253 else if (!md->queue->mq_ops)
254 /* request_fn queue stacked on request_fn queue(s) */
255 free_old_clone_request(md, clone);
256
257 if (!md->queue->mq_ops)
258 free_old_rq_tio(tio);
259}
260
261/* 214/*
262 * Complete the clone and the original request. 215 * Complete the clone and the original request.
263 * Must be called without clone's queue lock held, 216 * Must be called without clone's queue lock held,
@@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error)
270 struct mapped_device *md = tio->md; 223 struct mapped_device *md = tio->md;
271 struct request *rq = tio->orig; 224 struct request *rq = tio->orig;
272 225
273 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 226 blk_rq_unprep_clone(clone);
274 rq->errors = clone->errors; 227 tio->ti->type->release_clone_rq(clone);
275 rq->resid_len = clone->resid_len;
276
277 if (rq->sense)
278 /*
279 * We are using the sense buffer of the original
280 * request.
281 * So setting the length of the sense data is enough.
282 */
283 rq->sense_len = clone->sense_len;
284 }
285 228
286 free_rq_clone(clone);
287 rq_end_stats(md, rq); 229 rq_end_stats(md, rq);
288 if (!rq->q->mq_ops) 230 if (!rq->q->mq_ops)
289 blk_end_request_all(rq, error); 231 blk_end_request_all(rq, error);
@@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
292 rq_completed(md, rw, true); 234 rq_completed(md, rw, true);
293} 235}
294 236
295static void dm_unprep_request(struct request *rq)
296{
297 struct dm_rq_target_io *tio = tio_from_request(rq);
298 struct request *clone = tio->clone;
299
300 if (!rq->q->mq_ops) {
301 rq->special = NULL;
302 rq->rq_flags &= ~RQF_DONTPREP;
303 }
304
305 if (clone)
306 free_rq_clone(clone);
307 else if (!tio->md->queue->mq_ops)
308 free_old_rq_tio(tio);
309}
310
311/* 237/*
312 * Requeue the original request of a clone. 238 * Requeue the original request of a clone.
313 */ 239 */
@@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
346 int rw = rq_data_dir(rq); 272 int rw = rq_data_dir(rq);
347 273
348 rq_end_stats(md, rq); 274 rq_end_stats(md, rq);
349 dm_unprep_request(rq); 275 if (tio->clone) {
276 blk_rq_unprep_clone(tio->clone);
277 tio->ti->type->release_clone_rq(tio->clone);
278 }
350 279
351 if (!rq->q->mq_ops) 280 if (!rq->q->mq_ops)
352 dm_old_requeue_request(rq); 281 dm_old_requeue_request(rq);
@@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
401 if (!clone) { 330 if (!clone) {
402 rq_end_stats(tio->md, rq); 331 rq_end_stats(tio->md, rq);
403 rw = rq_data_dir(rq); 332 rw = rq_data_dir(rq);
404 if (!rq->q->mq_ops) { 333 if (!rq->q->mq_ops)
405 blk_end_request_all(rq, tio->error); 334 blk_end_request_all(rq, tio->error);
406 rq_completed(tio->md, rw, false); 335 else
407 free_old_rq_tio(tio);
408 } else {
409 blk_mq_end_request(rq, tio->error); 336 blk_mq_end_request(rq, tio->error);
410 rq_completed(tio->md, rw, false); 337 rq_completed(tio->md, rw, false);
411 }
412 return; 338 return;
413 } 339 }
414 340
@@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
452{ 378{
453 struct dm_rq_target_io *tio = clone->end_io_data; 379 struct dm_rq_target_io *tio = clone->end_io_data;
454 380
455 if (!clone->q->mq_ops) {
456 /*
457 * For just cleaning up the information of the queue in which
458 * the clone was dispatched.
459 * The clone is *NOT* freed actually here because it is alloced
460 * from dm own mempool (RQF_ALLOCED isn't set).
461 */
462 __blk_put_request(clone->q, clone);
463 }
464
465 /* 381 /*
466 * Actual request completion is done in a softirq context which doesn't 382 * Actual request completion is done in a softirq context which doesn't
467 * hold the clone's queue lock. Otherwise, deadlock could occur because: 383 * hold the clone's queue lock. Otherwise, deadlock could occur because:
@@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq,
511 if (r) 427 if (r)
512 return r; 428 return r;
513 429
514 clone->cmd = rq->cmd;
515 clone->cmd_len = rq->cmd_len;
516 clone->sense = rq->sense;
517 clone->end_io = end_clone_request; 430 clone->end_io = end_clone_request;
518 clone->end_io_data = tio; 431 clone->end_io_data = tio;
519 432
@@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
522 return 0; 435 return 0;
523} 436}
524 437
525static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
526 struct dm_rq_target_io *tio, gfp_t gfp_mask)
527{
528 /*
529 * Create clone for use with .request_fn request_queue
530 */
531 struct request *clone;
532
533 clone = alloc_old_clone_request(md, gfp_mask);
534 if (!clone)
535 return NULL;
536
537 blk_rq_init(NULL, clone);
538 if (setup_clone(clone, rq, tio, gfp_mask)) {
539 /* -ENOMEM */
540 free_old_clone_request(md, clone);
541 return NULL;
542 }
543
544 return clone;
545}
546
547static void map_tio_request(struct kthread_work *work); 438static void map_tio_request(struct kthread_work *work);
548 439
549static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 440static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
@@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
565 kthread_init_work(&tio->work, map_tio_request); 456 kthread_init_work(&tio->work, map_tio_request);
566} 457}
567 458
568static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
569 struct mapped_device *md,
570 gfp_t gfp_mask)
571{
572 struct dm_rq_target_io *tio;
573 int srcu_idx;
574 struct dm_table *table;
575
576 tio = alloc_old_rq_tio(md, gfp_mask);
577 if (!tio)
578 return NULL;
579
580 init_tio(tio, rq, md);
581
582 table = dm_get_live_table(md, &srcu_idx);
583 /*
584 * Must clone a request if this .request_fn DM device
585 * is stacked on .request_fn device(s).
586 */
587 if (!dm_table_all_blk_mq_devices(table)) {
588 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
589 dm_put_live_table(md, srcu_idx);
590 free_old_rq_tio(tio);
591 return NULL;
592 }
593 }
594 dm_put_live_table(md, srcu_idx);
595
596 return tio;
597}
598
599/*
600 * Called with the queue lock held.
601 */
602static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
603{
604 struct mapped_device *md = q->queuedata;
605 struct dm_rq_target_io *tio;
606
607 if (unlikely(rq->special)) {
608 DMWARN("Already has something in rq->special.");
609 return BLKPREP_KILL;
610 }
611
612 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
613 if (!tio)
614 return BLKPREP_DEFER;
615
616 rq->special = tio;
617 rq->rq_flags |= RQF_DONTPREP;
618
619 return BLKPREP_OK;
620}
621
622/* 459/*
623 * Returns: 460 * Returns:
624 * DM_MAPIO_* : the request has been processed as indicated 461 * DM_MAPIO_* : the request has been processed as indicated
@@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
633 struct request *rq = tio->orig; 470 struct request *rq = tio->orig;
634 struct request *clone = NULL; 471 struct request *clone = NULL;
635 472
636 if (tio->clone) { 473 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
637 clone = tio->clone;
638 r = ti->type->map_rq(ti, clone, &tio->info);
639 if (r == DM_MAPIO_DELAY_REQUEUE)
640 return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
641 } else {
642 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
643 if (r < 0) {
644 /* The target wants to complete the I/O */
645 dm_kill_unmapped_request(rq, r);
646 return r;
647 }
648 if (r == DM_MAPIO_REMAPPED &&
649 setup_clone(clone, rq, tio, GFP_ATOMIC)) {
650 /* -ENOMEM */
651 ti->type->release_clone_rq(clone);
652 return DM_MAPIO_REQUEUE;
653 }
654 }
655
656 switch (r) { 474 switch (r) {
657 case DM_MAPIO_SUBMITTED: 475 case DM_MAPIO_SUBMITTED:
658 /* The target has taken the I/O to submit by itself later */ 476 /* The target has taken the I/O to submit by itself later */
659 break; 477 break;
660 case DM_MAPIO_REMAPPED: 478 case DM_MAPIO_REMAPPED:
479 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
480 /* -ENOMEM */
481 ti->type->release_clone_rq(clone);
482 return DM_MAPIO_REQUEUE;
483 }
484
661 /* The target has remapped the I/O so dispatch it */ 485 /* The target has remapped the I/O so dispatch it */
662 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 486 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
663 blk_rq_pos(rq)); 487 blk_rq_pos(rq));
@@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
716 dm_get(md); 540 dm_get(md);
717} 541}
718 542
543static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
544{
545 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
546
547 /*
548 * Must initialize md member of tio, otherwise it won't
549 * be available in dm_mq_queue_rq.
550 */
551 tio->md = md;
552
553 if (md->init_tio_pdu) {
554 /* target-specific per-io data is immediately after the tio */
555 tio->info.ptr = tio + 1;
556 }
557
558 return 0;
559}
560
561static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
562{
563 return __dm_rq_init_rq(q->rq_alloc_data, rq);
564}
565
719static void map_tio_request(struct kthread_work *work) 566static void map_tio_request(struct kthread_work *work)
720{ 567{
721 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 568 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
@@ -814,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q)
814 dm_start_request(md, rq); 661 dm_start_request(md, rq);
815 662
816 tio = tio_from_request(rq); 663 tio = tio_from_request(rq);
664 init_tio(tio, rq, md);
817 /* Establish tio->ti before queuing work (map_tio_request) */ 665 /* Establish tio->ti before queuing work (map_tio_request) */
818 tio->ti = ti; 666 tio->ti = ti;
819 kthread_queue_work(&md->kworker, &tio->work); 667 kthread_queue_work(&md->kworker, &tio->work);
@@ -824,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q)
824/* 672/*
825 * Fully initialize a .request_fn request-based queue. 673 * Fully initialize a .request_fn request-based queue.
826 */ 674 */
827int dm_old_init_request_queue(struct mapped_device *md) 675int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
828{ 676{
677 struct dm_target *immutable_tgt;
678
829 /* Fully initialize the queue */ 679 /* Fully initialize the queue */
830 if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) 680 md->queue->cmd_size = sizeof(struct dm_rq_target_io);
681 md->queue->rq_alloc_data = md;
682 md->queue->request_fn = dm_old_request_fn;
683 md->queue->init_rq_fn = dm_rq_init_rq;
684
685 immutable_tgt = dm_table_get_immutable_target(t);
686 if (immutable_tgt && immutable_tgt->per_io_data_size) {
687 /* any target-specific per-io data is immediately after the tio */
688 md->queue->cmd_size += immutable_tgt->per_io_data_size;
689 md->init_tio_pdu = true;
690 }
691 if (blk_init_allocated_queue(md->queue) < 0)
831 return -EINVAL; 692 return -EINVAL;
832 693
833 /* disable dm_old_request_fn's merge heuristic by default */ 694 /* disable dm_old_request_fn's merge heuristic by default */
@@ -835,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
835 696
836 dm_init_normal_md_queue(md); 697 dm_init_normal_md_queue(md);
837 blk_queue_softirq_done(md->queue, dm_softirq_done); 698 blk_queue_softirq_done(md->queue, dm_softirq_done);
838 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
839 699
840 /* Initialize the request-based DM worker thread */ 700 /* Initialize the request-based DM worker thread */
841 kthread_init_worker(&md->kworker); 701 kthread_init_worker(&md->kworker);
@@ -856,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
856 unsigned int hctx_idx, unsigned int request_idx, 716 unsigned int hctx_idx, unsigned int request_idx,
857 unsigned int numa_node) 717 unsigned int numa_node)
858{ 718{
859 struct mapped_device *md = data; 719 return __dm_rq_init_rq(data, rq);
860 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
861
862 /*
863 * Must initialize md member of tio, otherwise it won't
864 * be available in dm_mq_queue_rq.
865 */
866 tio->md = md;
867
868 if (md->init_tio_pdu) {
869 /* target-specific per-io data is immediately after the tio */
870 tio->info.ptr = tio + 1;
871 }
872
873 return 0;
874} 720}
875 721
876static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 722static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 4da06cae7bad..f0020d21b95f 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info {
48bool dm_use_blk_mq_default(void); 48bool dm_use_blk_mq_default(void);
49bool dm_use_blk_mq(struct mapped_device *md); 49bool dm_use_blk_mq(struct mapped_device *md);
50 50
51int dm_old_init_request_queue(struct mapped_device *md); 51int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
52int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t); 52int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
53void dm_mq_cleanup_mapped_device(struct mapped_device *md); 53void dm_mq_cleanup_mapped_device(struct mapped_device *md);
54 54
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0a427de23ed2..3ad16d9c9d5a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1750 char b[BDEVNAME_SIZE]; 1750 char b[BDEVNAME_SIZE];
1751 1751
1752 if (likely(q)) 1752 if (likely(q))
1753 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 1753 r |= bdi_congested(q->backing_dev_info, bdi_bits);
1754 else 1754 else
1755 DMWARN_LIMIT("%s: any_congested: nonexistent device %s", 1755 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1756 dm_device_name(t->md), 1756 dm_device_name(t->md),
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 710ae28fd618..43d3445b121d 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
131 return -EIO; 131 return -EIO;
132} 132}
133 133
134static int io_err_map_rq(struct dm_target *ti, struct request *clone,
135 union map_info *map_context)
136{
137 return -EIO;
138}
139
140static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, 134static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
141 union map_info *map_context, 135 union map_info *map_context,
142 struct request **clone) 136 struct request **clone)
@@ -161,7 +155,6 @@ static struct target_type error_target = {
161 .ctr = io_err_ctr, 155 .ctr = io_err_ctr,
162 .dtr = io_err_dtr, 156 .dtr = io_err_dtr,
163 .map = io_err_map, 157 .map = io_err_map,
164 .map_rq = io_err_map_rq,
165 .clone_and_map_rq = io_err_clone_and_map_rq, 158 .clone_and_map_rq = io_err_clone_and_map_rq,
166 .release_clone_rq = io_err_release_clone_rq, 159 .release_clone_rq = io_err_release_clone_rq,
167 .direct_access = io_err_direct_access, 160 .direct_access = io_err_direct_access,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d1c05c12a9db..2b266a2b5035 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
699 699
700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
701{ 701{
702 return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && 702 return op_is_flush(bio->bi_opf) &&
703 dm_thin_changed_this_transaction(tc->td); 703 dm_thin_changed_this_transaction(tc->td);
704} 704}
705 705
@@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
870 struct bio *bio; 870 struct bio *bio;
871 871
872 while ((bio = bio_list_pop(&cell->bios))) { 872 while ((bio = bio_list_pop(&cell->bios))) {
873 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 873 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
874 bio_op(bio) == REQ_OP_DISCARD)
875 bio_list_add(&info->defer_bios, bio); 874 bio_list_add(&info->defer_bios, bio);
876 else { 875 else {
877 inc_all_io_entry(info->tc->pool, bio); 876 inc_all_io_entry(info->tc->pool, bio);
@@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context,
1716 struct bio *bio; 1715 struct bio *bio;
1717 1716
1718 while ((bio = bio_list_pop(&cell->bios))) { 1717 while ((bio = bio_list_pop(&cell->bios))) {
1719 if ((bio_data_dir(bio) == WRITE) || 1718 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1720 (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 1719 bio_op(bio) == REQ_OP_DISCARD)
1721 bio_op(bio) == REQ_OP_DISCARD))
1722 bio_list_add(&info->defer_bios, bio); 1720 bio_list_add(&info->defer_bios, bio);
1723 else { 1721 else {
1724 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; 1722 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2635 return DM_MAPIO_SUBMITTED; 2633 return DM_MAPIO_SUBMITTED;
2636 } 2634 }
2637 2635
2638 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 2636 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2639 bio_op(bio) == REQ_OP_DISCARD) {
2640 thin_defer_bio_with_throttle(tc, bio); 2637 thin_defer_bio_with_throttle(tc, bio);
2641 return DM_MAPIO_SUBMITTED; 2638 return DM_MAPIO_SUBMITTED;
2642 } 2639 }
@@ -2714,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2714 return 1; 2711 return 1;
2715 2712
2716 q = bdev_get_queue(pt->data_dev->bdev); 2713 q = bdev_get_queue(pt->data_dev->bdev);
2717 return bdi_congested(&q->backing_dev_info, bdi_bits); 2714 return bdi_congested(q->backing_dev_info, bdi_bits);
2718} 2715}
2719 2716
2720static void requeue_bios(struct pool *pool) 2717static void requeue_bios(struct pool *pool)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3086da5664f3..5bd9ab06a562 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE;
91 */ 91 */
92struct dm_md_mempools { 92struct dm_md_mempools {
93 mempool_t *io_pool; 93 mempool_t *io_pool;
94 mempool_t *rq_pool;
95 struct bio_set *bs; 94 struct bio_set *bs;
96}; 95};
97 96
@@ -466,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
466 465
467 if (r > 0) { 466 if (r > 0) {
468 /* 467 /*
469 * Target determined this ioctl is being issued against 468 * Target determined this ioctl is being issued against a
470 * a logical partition of the parent bdev; so extra 469 * subset of the parent bdev; require extra privileges.
471 * validation is needed.
472 */ 470 */
473 r = scsi_verify_blk_ioctl(NULL, cmd); 471 if (!capable(CAP_SYS_RAWIO)) {
474 if (r) 472 DMWARN_LIMIT(
473 "%s: sending ioctl %x to DM device without required privilege.",
474 current->comm, cmd);
475 r = -ENOIOCTLCMD;
475 goto out; 476 goto out;
477 }
476 } 478 }
477 479
478 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 480 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
@@ -1314,7 +1316,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
1314 * With request-based DM we only need to check the 1316 * With request-based DM we only need to check the
1315 * top-level queue for congestion. 1317 * top-level queue for congestion.
1316 */ 1318 */
1317 r = md->queue->backing_dev_info.wb.state & bdi_bits; 1319 r = md->queue->backing_dev_info->wb.state & bdi_bits;
1318 } else { 1320 } else {
1319 map = dm_get_live_table_fast(md); 1321 map = dm_get_live_table_fast(md);
1320 if (map) 1322 if (map)
@@ -1397,7 +1399,7 @@ void dm_init_md_queue(struct mapped_device *md)
1397 * - must do so here (in alloc_dev callchain) before queue is used 1399 * - must do so here (in alloc_dev callchain) before queue is used
1398 */ 1400 */
1399 md->queue->queuedata = md; 1401 md->queue->queuedata = md;
1400 md->queue->backing_dev_info.congested_data = md; 1402 md->queue->backing_dev_info->congested_data = md;
1401} 1403}
1402 1404
1403void dm_init_normal_md_queue(struct mapped_device *md) 1405void dm_init_normal_md_queue(struct mapped_device *md)
@@ -1408,7 +1410,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
1408 /* 1410 /*
1409 * Initialize aspects of queue that aren't relevant for blk-mq 1411 * Initialize aspects of queue that aren't relevant for blk-mq
1410 */ 1412 */
1411 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1413 md->queue->backing_dev_info->congested_fn = dm_any_congested;
1412 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1414 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1413} 1415}
1414 1416
@@ -1419,7 +1421,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
1419 if (md->kworker_task) 1421 if (md->kworker_task)
1420 kthread_stop(md->kworker_task); 1422 kthread_stop(md->kworker_task);
1421 mempool_destroy(md->io_pool); 1423 mempool_destroy(md->io_pool);
1422 mempool_destroy(md->rq_pool);
1423 if (md->bs) 1424 if (md->bs)
1424 bioset_free(md->bs); 1425 bioset_free(md->bs);
1425 1426
@@ -1595,12 +1596,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1595 goto out; 1596 goto out;
1596 } 1597 }
1597 1598
1598 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 1599 BUG_ON(!p || md->io_pool || md->bs);
1599 1600
1600 md->io_pool = p->io_pool; 1601 md->io_pool = p->io_pool;
1601 p->io_pool = NULL; 1602 p->io_pool = NULL;
1602 md->rq_pool = p->rq_pool;
1603 p->rq_pool = NULL;
1604 md->bs = p->bs; 1603 md->bs = p->bs;
1605 p->bs = NULL; 1604 p->bs = NULL;
1606 1605
@@ -1777,7 +1776,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
1777 1776
1778 switch (type) { 1777 switch (type) {
1779 case DM_TYPE_REQUEST_BASED: 1778 case DM_TYPE_REQUEST_BASED:
1780 r = dm_old_init_request_queue(md); 1779 r = dm_old_init_request_queue(md, t);
1781 if (r) { 1780 if (r) {
1782 DMERR("Cannot initialize queue for request-based mapped device"); 1781 DMERR("Cannot initialize queue for request-based mapped device");
1783 return r; 1782 return r;
@@ -2493,7 +2492,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2493 unsigned integrity, unsigned per_io_data_size) 2492 unsigned integrity, unsigned per_io_data_size)
2494{ 2493{
2495 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2494 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2496 struct kmem_cache *cachep = NULL;
2497 unsigned int pool_size = 0; 2495 unsigned int pool_size = 0;
2498 unsigned int front_pad; 2496 unsigned int front_pad;
2499 2497
@@ -2503,20 +2501,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2503 switch (type) { 2501 switch (type) {
2504 case DM_TYPE_BIO_BASED: 2502 case DM_TYPE_BIO_BASED:
2505 case DM_TYPE_DAX_BIO_BASED: 2503 case DM_TYPE_DAX_BIO_BASED:
2506 cachep = _io_cache;
2507 pool_size = dm_get_reserved_bio_based_ios(); 2504 pool_size = dm_get_reserved_bio_based_ios();
2508 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2505 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2506
2507 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
2508 if (!pools->io_pool)
2509 goto out;
2509 break; 2510 break;
2510 case DM_TYPE_REQUEST_BASED: 2511 case DM_TYPE_REQUEST_BASED:
2511 cachep = _rq_tio_cache;
2512 pool_size = dm_get_reserved_rq_based_ios();
2513 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
2514 if (!pools->rq_pool)
2515 goto out;
2516 /* fall through to setup remaining rq-based pools */
2517 case DM_TYPE_MQ_REQUEST_BASED: 2512 case DM_TYPE_MQ_REQUEST_BASED:
2518 if (!pool_size) 2513 pool_size = dm_get_reserved_rq_based_ios();
2519 pool_size = dm_get_reserved_rq_based_ios();
2520 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2514 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2521 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2515 /* per_io_data_size is used for blk-mq pdu at queue allocation */
2522 break; 2516 break;
@@ -2524,12 +2518,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2524 BUG(); 2518 BUG();
2525 } 2519 }
2526 2520
2527 if (cachep) {
2528 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2529 if (!pools->io_pool)
2530 goto out;
2531 }
2532
2533 pools->bs = bioset_create_nobvec(pool_size, front_pad); 2521 pools->bs = bioset_create_nobvec(pool_size, front_pad);
2534 if (!pools->bs) 2522 if (!pools->bs)
2535 goto out; 2523 goto out;
@@ -2551,7 +2539,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
2551 return; 2539 return;
2552 2540
2553 mempool_destroy(pools->io_pool); 2541 mempool_destroy(pools->io_pool);
2554 mempool_destroy(pools->rq_pool);
2555 2542
2556 if (pools->bs) 2543 if (pools->bs)
2557 bioset_free(pools->bs); 2544 bioset_free(pools->bs);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08b9654..f298b01f7ab3 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
95/* 95/*
96 * To check whether the target type is request-based or not (bio-based). 96 * To check whether the target type is request-based or not (bio-based).
97 */ 97 */
98#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ 98#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
99 ((t)->type->clone_and_map_rq != NULL))
100 99
101/* 100/*
102 * To check whether the target type is a hybrid (capable of being 101 * To check whether the target type is a hybrid (capable of being
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5975c9915684..f1c7bbac31a5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits)
62 62
63 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 63 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
64 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); 64 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
65 ret |= bdi_congested(&q->backing_dev_info, bits); 65 ret |= bdi_congested(q->backing_dev_info, bits);
66 } 66 }
67 67
68 return ret; 68 return ret;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 01175dac0db6..ba485dcf1064 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5346,8 +5346,8 @@ int md_run(struct mddev *mddev)
5346 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); 5346 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5347 else 5347 else
5348 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); 5348 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5349 mddev->queue->backing_dev_info.congested_data = mddev; 5349 mddev->queue->backing_dev_info->congested_data = mddev;
5350 mddev->queue->backing_dev_info.congested_fn = md_congested; 5350 mddev->queue->backing_dev_info->congested_fn = md_congested;
5351 } 5351 }
5352 if (pers->sync_request) { 5352 if (pers->sync_request) {
5353 if (mddev->kobj.sd && 5353 if (mddev->kobj.sd &&
@@ -5704,7 +5704,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
5704 5704
5705 __md_stop_writes(mddev); 5705 __md_stop_writes(mddev);
5706 __md_stop(mddev); 5706 __md_stop(mddev);
5707 mddev->queue->backing_dev_info.congested_fn = NULL; 5707 mddev->queue->backing_dev_info->congested_fn = NULL;
5708 5708
5709 /* tell userspace to handle 'inactive' */ 5709 /* tell userspace to handle 'inactive' */
5710 sysfs_notify_dirent_safe(mddev->sysfs_state); 5710 sysfs_notify_dirent_safe(mddev->sysfs_state);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index aa8c4e5c1ee2..d457afa672d5 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
169 if (rdev && !test_bit(Faulty, &rdev->flags)) { 169 if (rdev && !test_bit(Faulty, &rdev->flags)) {
170 struct request_queue *q = bdev_get_queue(rdev->bdev); 170 struct request_queue *q = bdev_get_queue(rdev->bdev);
171 171
172 ret |= bdi_congested(&q->backing_dev_info, bits); 172 ret |= bdi_congested(q->backing_dev_info, bits);
173 /* Just like multipath_map, we just check the 173 /* Just like multipath_map, we just check the
174 * first available device 174 * first available device
175 */ 175 */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 848365d474f3..d6585239bff2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
41 for (i = 0; i < raid_disks && !ret ; i++) { 41 for (i = 0; i < raid_disks && !ret ; i++) {
42 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); 42 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
43 43
44 ret |= bdi_congested(&q->backing_dev_info, bits); 44 ret |= bdi_congested(q->backing_dev_info, bits);
45 } 45 }
46 return ret; 46 return ret;
47} 47}
@@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev)
420 */ 420 */
421 int stripe = mddev->raid_disks * 421 int stripe = mddev->raid_disks *
422 (mddev->chunk_sectors << 9) / PAGE_SIZE; 422 (mddev->chunk_sectors << 9) / PAGE_SIZE;
423 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 423 if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
424 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 424 mddev->queue->backing_dev_info->ra_pages = 2* stripe;
425 } 425 }
426 426
427 dump_zones(mddev); 427 dump_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7b0f647bcccb..830ff2b20346 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
744 * non-congested targets, it can be removed 744 * non-congested targets, it can be removed
745 */ 745 */
746 if ((bits & (1 << WB_async_congested)) || 1) 746 if ((bits & (1 << WB_async_congested)) || 1)
747 ret |= bdi_congested(&q->backing_dev_info, bits); 747 ret |= bdi_congested(q->backing_dev_info, bits);
748 else 748 else
749 ret &= bdi_congested(&q->backing_dev_info, bits); 749 ret &= bdi_congested(q->backing_dev_info, bits);
750 } 750 }
751 } 751 }
752 rcu_read_unlock(); 752 rcu_read_unlock();
@@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1170 int i, disks; 1170 int i, disks;
1171 struct bitmap *bitmap = mddev->bitmap; 1171 struct bitmap *bitmap = mddev->bitmap;
1172 unsigned long flags; 1172 unsigned long flags;
1173 const int op = bio_op(bio);
1174 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1175 const unsigned long do_flush_fua = (bio->bi_opf &
1176 (REQ_PREFLUSH | REQ_FUA));
1177 struct md_rdev *blocked_rdev; 1173 struct md_rdev *blocked_rdev;
1178 struct blk_plug_cb *cb; 1174 struct blk_plug_cb *cb;
1179 struct raid1_plug_cb *plug = NULL; 1175 struct raid1_plug_cb *plug = NULL;
@@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1389 conf->mirrors[i].rdev->data_offset); 1385 conf->mirrors[i].rdev->data_offset);
1390 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1386 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1391 mbio->bi_end_io = raid1_end_write_request; 1387 mbio->bi_end_io = raid1_end_write_request;
1392 bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); 1388 mbio->bi_opf = bio_op(bio) |
1389 (bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA));
1393 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1390 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1394 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1391 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1395 conf->raid_disks - mddev->degraded > 1) 1392 conf->raid_disks - mddev->degraded > 1)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1920756828df..6bc5c2a85160 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
860 if (rdev && !test_bit(Faulty, &rdev->flags)) { 860 if (rdev && !test_bit(Faulty, &rdev->flags)) {
861 struct request_queue *q = bdev_get_queue(rdev->bdev); 861 struct request_queue *q = bdev_get_queue(rdev->bdev);
862 862
863 ret |= bdi_congested(&q->backing_dev_info, bits); 863 ret |= bdi_congested(q->backing_dev_info, bits);
864 } 864 }
865 } 865 }
866 rcu_read_unlock(); 866 rcu_read_unlock();
@@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev)
3841 * maybe... 3841 * maybe...
3842 */ 3842 */
3843 stripe /= conf->geo.near_copies; 3843 stripe /= conf->geo.near_copies;
3844 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3844 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
3845 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3845 mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
3846 } 3846 }
3847 3847
3848 if (md_integrity_register(mddev)) 3848 if (md_integrity_register(mddev))
@@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf)
4643 int stripe = conf->geo.raid_disks * 4643 int stripe = conf->geo.raid_disks *
4644 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); 4644 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4645 stripe /= conf->geo.near_copies; 4645 stripe /= conf->geo.near_copies;
4646 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4646 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
4647 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4647 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
4648 } 4648 }
4649 conf->fullsync = 0; 4649 conf->fullsync = 0;
4650} 4650}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3c7e106c12a2..6214e699342c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6331,10 +6331,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6331 mddev_suspend(mddev); 6331 mddev_suspend(mddev);
6332 conf->skip_copy = new; 6332 conf->skip_copy = new;
6333 if (new) 6333 if (new)
6334 mddev->queue->backing_dev_info.capabilities |= 6334 mddev->queue->backing_dev_info->capabilities |=
6335 BDI_CAP_STABLE_WRITES; 6335 BDI_CAP_STABLE_WRITES;
6336 else 6336 else
6337 mddev->queue->backing_dev_info.capabilities &= 6337 mddev->queue->backing_dev_info->capabilities &=
6338 ~BDI_CAP_STABLE_WRITES; 6338 ~BDI_CAP_STABLE_WRITES;
6339 mddev_resume(mddev); 6339 mddev_resume(mddev);
6340 } 6340 }
@@ -7153,8 +7153,8 @@ static int raid5_run(struct mddev *mddev)
7153 int data_disks = conf->previous_raid_disks - conf->max_degraded; 7153 int data_disks = conf->previous_raid_disks - conf->max_degraded;
7154 int stripe = data_disks * 7154 int stripe = data_disks *
7155 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 7155 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
7156 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 7156 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
7157 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 7157 mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
7158 7158
7159 chunk_size = mddev->chunk_sectors << 9; 7159 chunk_size = mddev->chunk_sectors << 9;
7160 blk_queue_io_min(mddev->queue, chunk_size); 7160 blk_queue_io_min(mddev->queue, chunk_size);
@@ -7763,8 +7763,8 @@ static void end_reshape(struct r5conf *conf)
7763 int data_disks = conf->raid_disks - conf->max_degraded; 7763 int data_disks = conf->raid_disks - conf->max_degraded;
7764 int stripe = data_disks * ((conf->chunk_sectors << 9) 7764 int stripe = data_disks * ((conf->chunk_sectors << 9)
7765 / PAGE_SIZE); 7765 / PAGE_SIZE);
7766 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 7766 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
7767 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 7767 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
7768 } 7768 }
7769 } 7769 }
7770} 7770}