summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-mpath.c132
-rw-r--r--drivers/md/dm-rq.c251
-rw-r--r--drivers/md/dm-rq.h2
-rw-r--r--drivers/md/dm-target.c7
-rw-r--r--drivers/md/dm.c30
-rw-r--r--drivers/md/dm.h3
-rw-r--r--include/linux/device-mapper.h3
8 files changed, 85 insertions, 344 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 40ceba1fe8be..136fda3ff9e5 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -92,7 +92,6 @@ struct mapped_device {
92 * io objects are allocated from here. 92 * io objects are allocated from here.
93 */ 93 */
94 mempool_t *io_pool; 94 mempool_t *io_pool;
95 mempool_t *rq_pool;
96 95
97 struct bio_set *bs; 96 struct bio_set *bs;
98 97
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6400cffb986d..784f2374c3a4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -92,12 +92,6 @@ struct multipath {
92 92
93 unsigned queue_mode; 93 unsigned queue_mode;
94 94
95 /*
96 * We must use a mempool of dm_mpath_io structs so that we
97 * can resubmit bios on error.
98 */
99 mempool_t *mpio_pool;
100
101 struct mutex work_mutex; 95 struct mutex work_mutex;
102 struct work_struct trigger_event; 96 struct work_struct trigger_event;
103 97
@@ -115,8 +109,6 @@ struct dm_mpath_io {
115 109
116typedef int (*action_fn) (struct pgpath *pgpath); 110typedef int (*action_fn) (struct pgpath *pgpath);
117 111
118static struct kmem_cache *_mpio_cache;
119
120static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 112static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
121static void trigger_event(struct work_struct *work); 113static void trigger_event(struct work_struct *work);
122static void activate_path(struct work_struct *work); 114static void activate_path(struct work_struct *work);
@@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
209 init_waitqueue_head(&m->pg_init_wait); 201 init_waitqueue_head(&m->pg_init_wait);
210 mutex_init(&m->work_mutex); 202 mutex_init(&m->work_mutex);
211 203
212 m->mpio_pool = NULL;
213 m->queue_mode = DM_TYPE_NONE; 204 m->queue_mode = DM_TYPE_NONE;
214 205
215 m->ti = ti; 206 m->ti = ti;
@@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
229 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; 220 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
230 else 221 else
231 m->queue_mode = DM_TYPE_REQUEST_BASED; 222 m->queue_mode = DM_TYPE_REQUEST_BASED;
232 } 223 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
233
234 if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
235 unsigned min_ios = dm_get_reserved_rq_based_ios();
236
237 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
238 if (!m->mpio_pool)
239 return -ENOMEM;
240 }
241 else if (m->queue_mode == DM_TYPE_BIO_BASED) {
242 INIT_WORK(&m->process_queued_bios, process_queued_bios); 224 INIT_WORK(&m->process_queued_bios, process_queued_bios);
243 /* 225 /*
244 * bio-based doesn't support any direct scsi_dh management; 226 * bio-based doesn't support any direct scsi_dh management;
@@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
263 245
264 kfree(m->hw_handler_name); 246 kfree(m->hw_handler_name);
265 kfree(m->hw_handler_params); 247 kfree(m->hw_handler_params);
266 mempool_destroy(m->mpio_pool);
267 kfree(m); 248 kfree(m);
268} 249}
269 250
@@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
272 return info->ptr; 253 return info->ptr;
273} 254}
274 255
275static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
276{
277 struct dm_mpath_io *mpio;
278
279 if (!m->mpio_pool) {
280 /* Use blk-mq pdu memory requested via per_io_data_size */
281 mpio = get_mpio(info);
282 memset(mpio, 0, sizeof(*mpio));
283 return mpio;
284 }
285
286 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
287 if (!mpio)
288 return NULL;
289
290 memset(mpio, 0, sizeof(*mpio));
291 info->ptr = mpio;
292
293 return mpio;
294}
295
296static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
297{
298 /* Only needed for non blk-mq (.request_fn) multipath */
299 if (m->mpio_pool) {
300 struct dm_mpath_io *mpio = info->ptr;
301
302 info->ptr = NULL;
303 mempool_free(mpio, m->mpio_pool);
304 }
305}
306
307static size_t multipath_per_bio_data_size(void) 256static size_t multipath_per_bio_data_size(void)
308{ 257{
309 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); 258 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
@@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
530/* 479/*
531 * Map cloned requests (request-based multipath) 480 * Map cloned requests (request-based multipath)
532 */ 481 */
533static int __multipath_map(struct dm_target *ti, struct request *clone, 482static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
534 union map_info *map_context, 483 union map_info *map_context,
535 struct request *rq, struct request **__clone) 484 struct request **__clone)
536{ 485{
537 struct multipath *m = ti->private; 486 struct multipath *m = ti->private;
538 int r = DM_MAPIO_REQUEUE; 487 int r = DM_MAPIO_REQUEUE;
539 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); 488 size_t nr_bytes = blk_rq_bytes(rq);
540 struct pgpath *pgpath; 489 struct pgpath *pgpath;
541 struct block_device *bdev; 490 struct block_device *bdev;
542 struct dm_mpath_io *mpio; 491 struct dm_mpath_io *mpio = get_mpio(map_context);
492 struct request *clone;
543 493
544 /* Do we need to select a new pgpath? */ 494 /* Do we need to select a new pgpath? */
545 pgpath = lockless_dereference(m->current_pgpath); 495 pgpath = lockless_dereference(m->current_pgpath);
@@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
556 return r; 506 return r;
557 } 507 }
558 508
559 mpio = set_mpio(m, map_context); 509 memset(mpio, 0, sizeof(*mpio));
560 if (!mpio)
561 /* ENOMEM, requeue */
562 return r;
563
564 mpio->pgpath = pgpath; 510 mpio->pgpath = pgpath;
565 mpio->nr_bytes = nr_bytes; 511 mpio->nr_bytes = nr_bytes;
566 512
567 bdev = pgpath->path.dev->bdev; 513 bdev = pgpath->path.dev->bdev;
568 514
569 if (clone) { 515 clone = blk_get_request(bdev_get_queue(bdev),
570 /* 516 rq->cmd_flags | REQ_NOMERGE,
571 * Old request-based interface: allocated clone is passed in. 517 GFP_ATOMIC);
572 * Used by: .request_fn stacked on .request_fn path(s). 518 if (IS_ERR(clone)) {
573 */ 519 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
574 clone->q = bdev_get_queue(bdev); 520 return r;
575 clone->rq_disk = bdev->bd_disk;
576 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
577 } else {
578 /*
579 * blk-mq request-based interface; used by both:
580 * .request_fn stacked on blk-mq path(s) and
581 * blk-mq stacked on blk-mq path(s).
582 */
583 clone = blk_mq_alloc_request(bdev_get_queue(bdev),
584 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
585 if (IS_ERR(clone)) {
586 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
587 clear_request_fn_mpio(m, map_context);
588 return r;
589 }
590 clone->bio = clone->biotail = NULL;
591 clone->rq_disk = bdev->bd_disk;
592 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
593 *__clone = clone;
594 } 521 }
522 clone->bio = clone->biotail = NULL;
523 clone->rq_disk = bdev->bd_disk;
524 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
525 *__clone = clone;
595 526
596 if (pgpath->pg->ps.type->start_io) 527 if (pgpath->pg->ps.type->start_io)
597 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 528 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
600 return DM_MAPIO_REMAPPED; 531 return DM_MAPIO_REMAPPED;
601} 532}
602 533
603static int multipath_map(struct dm_target *ti, struct request *clone,
604 union map_info *map_context)
605{
606 return __multipath_map(ti, clone, map_context, NULL, NULL);
607}
608
609static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
610 union map_info *map_context,
611 struct request **clone)
612{
613 return __multipath_map(ti, NULL, map_context, rq, clone);
614}
615
616static void multipath_release_clone(struct request *clone) 534static void multipath_release_clone(struct request *clone)
617{ 535{
618 blk_mq_free_request(clone); 536 blk_put_request(clone);
619} 537}
620 538
621/* 539/*
@@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1187 ti->num_write_same_bios = 1; 1105 ti->num_write_same_bios = 1;
1188 if (m->queue_mode == DM_TYPE_BIO_BASED) 1106 if (m->queue_mode == DM_TYPE_BIO_BASED)
1189 ti->per_io_data_size = multipath_per_bio_data_size(); 1107 ti->per_io_data_size = multipath_per_bio_data_size();
1190 else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 1108 else
1191 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1109 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1192 1110
1193 return 0; 1111 return 0;
@@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1610 if (ps->type->end_io) 1528 if (ps->type->end_io)
1611 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1529 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1612 } 1530 }
1613 clear_request_fn_mpio(m, map_context);
1614 1531
1615 return r; 1532 return r;
1616} 1533}
@@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
2060 .module = THIS_MODULE, 1977 .module = THIS_MODULE,
2061 .ctr = multipath_ctr, 1978 .ctr = multipath_ctr,
2062 .dtr = multipath_dtr, 1979 .dtr = multipath_dtr,
2063 .map_rq = multipath_map,
2064 .clone_and_map_rq = multipath_clone_and_map, 1980 .clone_and_map_rq = multipath_clone_and_map,
2065 .release_clone_rq = multipath_release_clone, 1981 .release_clone_rq = multipath_release_clone,
2066 .rq_end_io = multipath_end_io, 1982 .rq_end_io = multipath_end_io,
@@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
2080{ 1996{
2081 int r; 1997 int r;
2082 1998
2083 /* allocate a slab for the dm_mpath_ios */
2084 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
2085 if (!_mpio_cache)
2086 return -ENOMEM;
2087
2088 r = dm_register_target(&multipath_target); 1999 r = dm_register_target(&multipath_target);
2089 if (r < 0) { 2000 if (r < 0) {
2090 DMERR("request-based register failed %d", r); 2001 DMERR("request-based register failed %d", r);
@@ -2120,8 +2031,6 @@ bad_alloc_kmpath_handlerd:
2120bad_alloc_kmultipathd: 2031bad_alloc_kmultipathd:
2121 dm_unregister_target(&multipath_target); 2032 dm_unregister_target(&multipath_target);
2122bad_register_target: 2033bad_register_target:
2123 kmem_cache_destroy(_mpio_cache);
2124
2125 return r; 2034 return r;
2126} 2035}
2127 2036
@@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
2131 destroy_workqueue(kmultipathd); 2040 destroy_workqueue(kmultipathd);
2132 2041
2133 dm_unregister_target(&multipath_target); 2042 dm_unregister_target(&multipath_target);
2134 kmem_cache_destroy(_mpio_cache);
2135} 2043}
2136 2044
2137module_init(dm_multipath_init); 2045module_init(dm_multipath_init);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3f12916f2424..8d0683474767 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
109 dm_mq_stop_queue(q); 109 dm_mq_stop_queue(q);
110} 110}
111 111
112static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
113 gfp_t gfp_mask)
114{
115 return mempool_alloc(md->io_pool, gfp_mask);
116}
117
118static void free_old_rq_tio(struct dm_rq_target_io *tio)
119{
120 mempool_free(tio, tio->md->io_pool);
121}
122
123static struct request *alloc_old_clone_request(struct mapped_device *md,
124 gfp_t gfp_mask)
125{
126 return mempool_alloc(md->rq_pool, gfp_mask);
127}
128
129static void free_old_clone_request(struct mapped_device *md, struct request *rq)
130{
131 mempool_free(rq, md->rq_pool);
132}
133
134/* 112/*
135 * Partial completion handling for request-based dm 113 * Partial completion handling for request-based dm
136 */ 114 */
@@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
185 163
186static struct dm_rq_target_io *tio_from_request(struct request *rq) 164static struct dm_rq_target_io *tio_from_request(struct request *rq)
187{ 165{
188 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 166 return blk_mq_rq_to_pdu(rq);
189} 167}
190 168
191static void rq_end_stats(struct mapped_device *md, struct request *orig) 169static void rq_end_stats(struct mapped_device *md, struct request *orig)
@@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
233 dm_put(md); 211 dm_put(md);
234} 212}
235 213
236static void free_rq_clone(struct request *clone)
237{
238 struct dm_rq_target_io *tio = clone->end_io_data;
239 struct mapped_device *md = tio->md;
240
241 blk_rq_unprep_clone(clone);
242
243 /*
244 * It is possible for a clone_old_rq() allocated clone to
245 * get passed in -- it may not yet have a request_queue.
246 * This is known to occur if the error target replaces
247 * a multipath target that has a request_fn queue stacked
248 * on blk-mq queue(s).
249 */
250 if (clone->q && clone->q->mq_ops)
251 /* stacked on blk-mq queue(s) */
252 tio->ti->type->release_clone_rq(clone);
253 else if (!md->queue->mq_ops)
254 /* request_fn queue stacked on request_fn queue(s) */
255 free_old_clone_request(md, clone);
256
257 if (!md->queue->mq_ops)
258 free_old_rq_tio(tio);
259}
260
261/* 214/*
262 * Complete the clone and the original request. 215 * Complete the clone and the original request.
263 * Must be called without clone's queue lock held, 216 * Must be called without clone's queue lock held,
@@ -270,7 +223,9 @@ static void dm_end_request(struct request *clone, int error)
270 struct mapped_device *md = tio->md; 223 struct mapped_device *md = tio->md;
271 struct request *rq = tio->orig; 224 struct request *rq = tio->orig;
272 225
273 free_rq_clone(clone); 226 blk_rq_unprep_clone(clone);
227 tio->ti->type->release_clone_rq(clone);
228
274 rq_end_stats(md, rq); 229 rq_end_stats(md, rq);
275 if (!rq->q->mq_ops) 230 if (!rq->q->mq_ops)
276 blk_end_request_all(rq, error); 231 blk_end_request_all(rq, error);
@@ -279,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
279 rq_completed(md, rw, true); 234 rq_completed(md, rw, true);
280} 235}
281 236
282static void dm_unprep_request(struct request *rq)
283{
284 struct dm_rq_target_io *tio = tio_from_request(rq);
285 struct request *clone = tio->clone;
286
287 if (!rq->q->mq_ops) {
288 rq->special = NULL;
289 rq->rq_flags &= ~RQF_DONTPREP;
290 }
291
292 if (clone)
293 free_rq_clone(clone);
294 else if (!tio->md->queue->mq_ops)
295 free_old_rq_tio(tio);
296}
297
298/* 237/*
299 * Requeue the original request of a clone. 238 * Requeue the original request of a clone.
300 */ 239 */
@@ -333,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
333 int rw = rq_data_dir(rq); 272 int rw = rq_data_dir(rq);
334 273
335 rq_end_stats(md, rq); 274 rq_end_stats(md, rq);
336 dm_unprep_request(rq); 275 if (tio->clone) {
276 blk_rq_unprep_clone(tio->clone);
277 tio->ti->type->release_clone_rq(tio->clone);
278 }
337 279
338 if (!rq->q->mq_ops) 280 if (!rq->q->mq_ops)
339 dm_old_requeue_request(rq); 281 dm_old_requeue_request(rq);
@@ -388,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
388 if (!clone) { 330 if (!clone) {
389 rq_end_stats(tio->md, rq); 331 rq_end_stats(tio->md, rq);
390 rw = rq_data_dir(rq); 332 rw = rq_data_dir(rq);
391 if (!rq->q->mq_ops) { 333 if (!rq->q->mq_ops)
392 blk_end_request_all(rq, tio->error); 334 blk_end_request_all(rq, tio->error);
393 rq_completed(tio->md, rw, false); 335 else
394 free_old_rq_tio(tio);
395 } else {
396 blk_mq_end_request(rq, tio->error); 336 blk_mq_end_request(rq, tio->error);
397 rq_completed(tio->md, rw, false); 337 rq_completed(tio->md, rw, false);
398 }
399 return; 338 return;
400 } 339 }
401 340
@@ -439,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
439{ 378{
440 struct dm_rq_target_io *tio = clone->end_io_data; 379 struct dm_rq_target_io *tio = clone->end_io_data;
441 380
442 if (!clone->q->mq_ops) {
443 /*
444 * For just cleaning up the information of the queue in which
445 * the clone was dispatched.
446 * The clone is *NOT* freed actually here because it is alloced
447 * from dm own mempool (RQF_ALLOCED isn't set).
448 */
449 __blk_put_request(clone->q, clone);
450 }
451
452 /* 381 /*
453 * Actual request completion is done in a softirq context which doesn't 382 * Actual request completion is done in a softirq context which doesn't
454 * hold the clone's queue lock. Otherwise, deadlock could occur because: 383 * hold the clone's queue lock. Otherwise, deadlock could occur because:
@@ -506,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
506 return 0; 435 return 0;
507} 436}
508 437
509static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
510 struct dm_rq_target_io *tio, gfp_t gfp_mask)
511{
512 /*
513 * Create clone for use with .request_fn request_queue
514 */
515 struct request *clone;
516
517 clone = alloc_old_clone_request(md, gfp_mask);
518 if (!clone)
519 return NULL;
520
521 blk_rq_init(NULL, clone);
522 if (setup_clone(clone, rq, tio, gfp_mask)) {
523 /* -ENOMEM */
524 free_old_clone_request(md, clone);
525 return NULL;
526 }
527
528 return clone;
529}
530
531static void map_tio_request(struct kthread_work *work); 438static void map_tio_request(struct kthread_work *work);
532 439
533static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 440static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
@@ -549,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
549 kthread_init_work(&tio->work, map_tio_request); 456 kthread_init_work(&tio->work, map_tio_request);
550} 457}
551 458
552static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
553 struct mapped_device *md,
554 gfp_t gfp_mask)
555{
556 struct dm_rq_target_io *tio;
557 int srcu_idx;
558 struct dm_table *table;
559
560 tio = alloc_old_rq_tio(md, gfp_mask);
561 if (!tio)
562 return NULL;
563
564 init_tio(tio, rq, md);
565
566 table = dm_get_live_table(md, &srcu_idx);
567 /*
568 * Must clone a request if this .request_fn DM device
569 * is stacked on .request_fn device(s).
570 */
571 if (!dm_table_all_blk_mq_devices(table)) {
572 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
573 dm_put_live_table(md, srcu_idx);
574 free_old_rq_tio(tio);
575 return NULL;
576 }
577 }
578 dm_put_live_table(md, srcu_idx);
579
580 return tio;
581}
582
583/*
584 * Called with the queue lock held.
585 */
586static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
587{
588 struct mapped_device *md = q->queuedata;
589 struct dm_rq_target_io *tio;
590
591 if (unlikely(rq->special)) {
592 DMWARN("Already has something in rq->special.");
593 return BLKPREP_KILL;
594 }
595
596 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
597 if (!tio)
598 return BLKPREP_DEFER;
599
600 rq->special = tio;
601 rq->rq_flags |= RQF_DONTPREP;
602
603 return BLKPREP_OK;
604}
605
606/* 459/*
607 * Returns: 460 * Returns:
608 * DM_MAPIO_* : the request has been processed as indicated 461 * DM_MAPIO_* : the request has been processed as indicated
@@ -617,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
617 struct request *rq = tio->orig; 470 struct request *rq = tio->orig;
618 struct request *clone = NULL; 471 struct request *clone = NULL;
619 472
620 if (tio->clone) { 473 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
621 clone = tio->clone;
622 r = ti->type->map_rq(ti, clone, &tio->info);
623 if (r == DM_MAPIO_DELAY_REQUEUE)
624 return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
625 } else {
626 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
627 if (r < 0) {
628 /* The target wants to complete the I/O */
629 dm_kill_unmapped_request(rq, r);
630 return r;
631 }
632 if (r == DM_MAPIO_REMAPPED &&
633 setup_clone(clone, rq, tio, GFP_ATOMIC)) {
634 /* -ENOMEM */
635 ti->type->release_clone_rq(clone);
636 return DM_MAPIO_REQUEUE;
637 }
638 }
639
640 switch (r) { 474 switch (r) {
641 case DM_MAPIO_SUBMITTED: 475 case DM_MAPIO_SUBMITTED:
642 /* The target has taken the I/O to submit by itself later */ 476 /* The target has taken the I/O to submit by itself later */
643 break; 477 break;
644 case DM_MAPIO_REMAPPED: 478 case DM_MAPIO_REMAPPED:
479 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
480 /* -ENOMEM */
481 ti->type->release_clone_rq(clone);
482 return DM_MAPIO_REQUEUE;
483 }
484
645 /* The target has remapped the I/O so dispatch it */ 485 /* The target has remapped the I/O so dispatch it */
646 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 486 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
647 blk_rq_pos(rq)); 487 blk_rq_pos(rq));
@@ -700,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
700 dm_get(md); 540 dm_get(md);
701} 541}
702 542
543static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
544{
545 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
546
547 /*
548 * Must initialize md member of tio, otherwise it won't
549 * be available in dm_mq_queue_rq.
550 */
551 tio->md = md;
552
553 if (md->init_tio_pdu) {
554 /* target-specific per-io data is immediately after the tio */
555 tio->info.ptr = tio + 1;
556 }
557
558 return 0;
559}
560
561static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
562{
563 return __dm_rq_init_rq(q->rq_alloc_data, rq);
564}
565
703static void map_tio_request(struct kthread_work *work) 566static void map_tio_request(struct kthread_work *work)
704{ 567{
705 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 568 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
@@ -794,6 +657,7 @@ static void dm_old_request_fn(struct request_queue *q)
794 dm_start_request(md, rq); 657 dm_start_request(md, rq);
795 658
796 tio = tio_from_request(rq); 659 tio = tio_from_request(rq);
660 init_tio(tio, rq, md);
797 /* Establish tio->ti before queuing work (map_tio_request) */ 661 /* Establish tio->ti before queuing work (map_tio_request) */
798 tio->ti = ti; 662 tio->ti = ti;
799 kthread_queue_work(&md->kworker, &tio->work); 663 kthread_queue_work(&md->kworker, &tio->work);
@@ -804,10 +668,22 @@ static void dm_old_request_fn(struct request_queue *q)
804/* 668/*
805 * Fully initialize a .request_fn request-based queue. 669 * Fully initialize a .request_fn request-based queue.
806 */ 670 */
807int dm_old_init_request_queue(struct mapped_device *md) 671int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
808{ 672{
673 struct dm_target *immutable_tgt;
674
809 /* Fully initialize the queue */ 675 /* Fully initialize the queue */
676 md->queue->cmd_size = sizeof(struct dm_rq_target_io);
677 md->queue->rq_alloc_data = md;
810 md->queue->request_fn = dm_old_request_fn; 678 md->queue->request_fn = dm_old_request_fn;
679 md->queue->init_rq_fn = dm_rq_init_rq;
680
681 immutable_tgt = dm_table_get_immutable_target(t);
682 if (immutable_tgt && immutable_tgt->per_io_data_size) {
683 /* any target-specific per-io data is immediately after the tio */
684 md->queue->cmd_size += immutable_tgt->per_io_data_size;
685 md->init_tio_pdu = true;
686 }
811 if (blk_init_allocated_queue(md->queue) < 0) 687 if (blk_init_allocated_queue(md->queue) < 0)
812 return -EINVAL; 688 return -EINVAL;
813 689
@@ -816,7 +692,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
816 692
817 dm_init_normal_md_queue(md); 693 dm_init_normal_md_queue(md);
818 blk_queue_softirq_done(md->queue, dm_softirq_done); 694 blk_queue_softirq_done(md->queue, dm_softirq_done);
819 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
820 695
821 /* Initialize the request-based DM worker thread */ 696 /* Initialize the request-based DM worker thread */
822 kthread_init_worker(&md->kworker); 697 kthread_init_worker(&md->kworker);
@@ -837,21 +712,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
837 unsigned int hctx_idx, unsigned int request_idx, 712 unsigned int hctx_idx, unsigned int request_idx,
838 unsigned int numa_node) 713 unsigned int numa_node)
839{ 714{
840 struct mapped_device *md = data; 715 return __dm_rq_init_rq(data, rq);
841 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
842
843 /*
844 * Must initialize md member of tio, otherwise it won't
845 * be available in dm_mq_queue_rq.
846 */
847 tio->md = md;
848
849 if (md->init_tio_pdu) {
850 /* target-specific per-io data is immediately after the tio */
851 tio->info.ptr = tio + 1;
852 }
853
854 return 0;
855} 716}
856 717
857static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 718static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 4da06cae7bad..f0020d21b95f 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info {
48bool dm_use_blk_mq_default(void); 48bool dm_use_blk_mq_default(void);
49bool dm_use_blk_mq(struct mapped_device *md); 49bool dm_use_blk_mq(struct mapped_device *md);
50 50
51int dm_old_init_request_queue(struct mapped_device *md); 51int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
52int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t); 52int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
53void dm_mq_cleanup_mapped_device(struct mapped_device *md); 53void dm_mq_cleanup_mapped_device(struct mapped_device *md);
54 54
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 710ae28fd618..43d3445b121d 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
131 return -EIO; 131 return -EIO;
132} 132}
133 133
134static int io_err_map_rq(struct dm_target *ti, struct request *clone,
135 union map_info *map_context)
136{
137 return -EIO;
138}
139
140static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, 134static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
141 union map_info *map_context, 135 union map_info *map_context,
142 struct request **clone) 136 struct request **clone)
@@ -161,7 +155,6 @@ static struct target_type error_target = {
161 .ctr = io_err_ctr, 155 .ctr = io_err_ctr,
162 .dtr = io_err_dtr, 156 .dtr = io_err_dtr,
163 .map = io_err_map, 157 .map = io_err_map,
164 .map_rq = io_err_map_rq,
165 .clone_and_map_rq = io_err_clone_and_map_rq, 158 .clone_and_map_rq = io_err_clone_and_map_rq,
166 .release_clone_rq = io_err_release_clone_rq, 159 .release_clone_rq = io_err_release_clone_rq,
167 .direct_access = io_err_direct_access, 160 .direct_access = io_err_direct_access,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3086da5664f3..ff4a29a97ad3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE;
91 */ 91 */
92struct dm_md_mempools { 92struct dm_md_mempools {
93 mempool_t *io_pool; 93 mempool_t *io_pool;
94 mempool_t *rq_pool;
95 struct bio_set *bs; 94 struct bio_set *bs;
96}; 95};
97 96
@@ -1419,7 +1418,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
1419 if (md->kworker_task) 1418 if (md->kworker_task)
1420 kthread_stop(md->kworker_task); 1419 kthread_stop(md->kworker_task);
1421 mempool_destroy(md->io_pool); 1420 mempool_destroy(md->io_pool);
1422 mempool_destroy(md->rq_pool);
1423 if (md->bs) 1421 if (md->bs)
1424 bioset_free(md->bs); 1422 bioset_free(md->bs);
1425 1423
@@ -1595,12 +1593,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1595 goto out; 1593 goto out;
1596 } 1594 }
1597 1595
1598 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 1596 BUG_ON(!p || md->io_pool || md->bs);
1599 1597
1600 md->io_pool = p->io_pool; 1598 md->io_pool = p->io_pool;
1601 p->io_pool = NULL; 1599 p->io_pool = NULL;
1602 md->rq_pool = p->rq_pool;
1603 p->rq_pool = NULL;
1604 md->bs = p->bs; 1600 md->bs = p->bs;
1605 p->bs = NULL; 1601 p->bs = NULL;
1606 1602
@@ -1777,7 +1773,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
1777 1773
1778 switch (type) { 1774 switch (type) {
1779 case DM_TYPE_REQUEST_BASED: 1775 case DM_TYPE_REQUEST_BASED:
1780 r = dm_old_init_request_queue(md); 1776 r = dm_old_init_request_queue(md, t);
1781 if (r) { 1777 if (r) {
1782 DMERR("Cannot initialize queue for request-based mapped device"); 1778 DMERR("Cannot initialize queue for request-based mapped device");
1783 return r; 1779 return r;
@@ -2493,7 +2489,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2493 unsigned integrity, unsigned per_io_data_size) 2489 unsigned integrity, unsigned per_io_data_size)
2494{ 2490{
2495 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2491 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2496 struct kmem_cache *cachep = NULL;
2497 unsigned int pool_size = 0; 2492 unsigned int pool_size = 0;
2498 unsigned int front_pad; 2493 unsigned int front_pad;
2499 2494
@@ -2503,20 +2498,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2503 switch (type) { 2498 switch (type) {
2504 case DM_TYPE_BIO_BASED: 2499 case DM_TYPE_BIO_BASED:
2505 case DM_TYPE_DAX_BIO_BASED: 2500 case DM_TYPE_DAX_BIO_BASED:
2506 cachep = _io_cache;
2507 pool_size = dm_get_reserved_bio_based_ios(); 2501 pool_size = dm_get_reserved_bio_based_ios();
2508 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2502 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2503
2504 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
2505 if (!pools->io_pool)
2506 goto out;
2509 break; 2507 break;
2510 case DM_TYPE_REQUEST_BASED: 2508 case DM_TYPE_REQUEST_BASED:
2511 cachep = _rq_tio_cache;
2512 pool_size = dm_get_reserved_rq_based_ios();
2513 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
2514 if (!pools->rq_pool)
2515 goto out;
2516 /* fall through to setup remaining rq-based pools */
2517 case DM_TYPE_MQ_REQUEST_BASED: 2509 case DM_TYPE_MQ_REQUEST_BASED:
2518 if (!pool_size) 2510 pool_size = dm_get_reserved_rq_based_ios();
2519 pool_size = dm_get_reserved_rq_based_ios();
2520 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2511 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2521 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2512 /* per_io_data_size is used for blk-mq pdu at queue allocation */
2522 break; 2513 break;
@@ -2524,12 +2515,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2524 BUG(); 2515 BUG();
2525 } 2516 }
2526 2517
2527 if (cachep) {
2528 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2529 if (!pools->io_pool)
2530 goto out;
2531 }
2532
2533 pools->bs = bioset_create_nobvec(pool_size, front_pad); 2518 pools->bs = bioset_create_nobvec(pool_size, front_pad);
2534 if (!pools->bs) 2519 if (!pools->bs)
2535 goto out; 2520 goto out;
@@ -2551,7 +2536,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
2551 return; 2536 return;
2552 2537
2553 mempool_destroy(pools->io_pool); 2538 mempool_destroy(pools->io_pool);
2554 mempool_destroy(pools->rq_pool);
2555 2539
2556 if (pools->bs) 2540 if (pools->bs)
2557 bioset_free(pools->bs); 2541 bioset_free(pools->bs);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08b9654..f298b01f7ab3 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
95/* 95/*
96 * To check whether the target type is request-based or not (bio-based). 96 * To check whether the target type is request-based or not (bio-based).
97 */ 97 */
98#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ 98#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
99 ((t)->type->clone_and_map_rq != NULL))
100 99
101/* 100/*
102 * To check whether the target type is a hybrid (capable of being 101 * To check whether the target type is a hybrid (capable of being
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ef7962e84444..a7e6903866fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -55,8 +55,6 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
55 * = 2: The target wants to push back the io 55 * = 2: The target wants to push back the io
56 */ 56 */
57typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 57typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
58typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
59 union map_info *map_context);
60typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 58typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
61 struct request *rq, 59 struct request *rq,
62 union map_info *map_context, 60 union map_info *map_context,
@@ -163,7 +161,6 @@ struct target_type {
163 dm_ctr_fn ctr; 161 dm_ctr_fn ctr;
164 dm_dtr_fn dtr; 162 dm_dtr_fn dtr;
165 dm_map_fn map; 163 dm_map_fn map;
166 dm_map_request_fn map_rq;
167 dm_clone_and_map_request_fn clone_and_map_rq; 164 dm_clone_and_map_request_fn clone_and_map_rq;
168 dm_release_clone_request_fn release_clone_rq; 165 dm_release_clone_request_fn release_clone_rq;
169 dm_endio_fn end_io; 166 dm_endio_fn end_io;