aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2014-02-28 09:33:44 -0500
committerMike Snitzer <snitzer@redhat.com>2014-03-27 16:56:24 -0400
commite809917735ebf1b9a56c24e877ce0d320baee2ec (patch)
tree51d9bde75f399db750f2b143481841b97105fe00 /drivers/md
parent9974fa2c6a7d470ca3c201fe7dbac64bf4dd8d2a (diff)
dm mpath: push back requests instead of queueing
There is no reason why multipath needs to queue requests internally for queue_if_no_path or pg_init; we should rather push them back onto the request queue. And while we're at it we can simplify the conditional statement in map_io() to make it easier to read. Since mpath no longer does internal queuing of I/O the table info no longer emits the internal queue_size. Instead it displays 1 if queuing is being used or 0 if it is not. Signed-off-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-mpath.c114
1 files changed, 36 insertions, 78 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c78e6a9e59ce..e1c3ed31c9df 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -93,9 +93,7 @@ struct multipath {
93 unsigned pg_init_count; /* Number of times pg_init called */ 93 unsigned pg_init_count; /* Number of times pg_init called */
94 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 94 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
95 95
96 unsigned queue_size;
97 struct work_struct process_queued_ios; 96 struct work_struct process_queued_ios;
98 struct list_head queued_ios;
99 97
100 struct work_struct trigger_event; 98 struct work_struct trigger_event;
101 99
@@ -124,6 +122,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
124static void process_queued_ios(struct work_struct *work); 122static void process_queued_ios(struct work_struct *work);
125static void trigger_event(struct work_struct *work); 123static void trigger_event(struct work_struct *work);
126static void activate_path(struct work_struct *work); 124static void activate_path(struct work_struct *work);
125static int __pgpath_busy(struct pgpath *pgpath);
127 126
128 127
129/*----------------------------------------------- 128/*-----------------------------------------------
@@ -195,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
195 m = kzalloc(sizeof(*m), GFP_KERNEL); 194 m = kzalloc(sizeof(*m), GFP_KERNEL);
196 if (m) { 195 if (m) {
197 INIT_LIST_HEAD(&m->priority_groups); 196 INIT_LIST_HEAD(&m->priority_groups);
198 INIT_LIST_HEAD(&m->queued_ios);
199 spin_lock_init(&m->lock); 197 spin_lock_init(&m->lock);
200 m->queue_io = 1; 198 m->queue_io = 1;
201 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; 199 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
@@ -368,12 +366,15 @@ failed:
368 */ 366 */
369static int __must_push_back(struct multipath *m) 367static int __must_push_back(struct multipath *m)
370{ 368{
371 return (m->queue_if_no_path != m->saved_queue_if_no_path && 369 return (m->queue_if_no_path ||
372 dm_noflush_suspending(m->ti)); 370 (m->queue_if_no_path != m->saved_queue_if_no_path &&
371 dm_noflush_suspending(m->ti)));
373} 372}
374 373
374#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required)
375
375static int map_io(struct multipath *m, struct request *clone, 376static int map_io(struct multipath *m, struct request *clone,
376 union map_info *map_context, unsigned was_queued) 377 union map_info *map_context)
377{ 378{
378 int r = DM_MAPIO_REMAPPED; 379 int r = DM_MAPIO_REMAPPED;
379 size_t nr_bytes = blk_rq_bytes(clone); 380 size_t nr_bytes = blk_rq_bytes(clone);
@@ -391,37 +392,28 @@ static int map_io(struct multipath *m, struct request *clone,
391 392
392 pgpath = m->current_pgpath; 393 pgpath = m->current_pgpath;
393 394
394 if (was_queued) 395 if (pgpath) {
395 m->queue_size--; 396 if (pg_ready(m)) {
396 397 bdev = pgpath->path.dev->bdev;
397 if (m->pg_init_required) { 398 clone->q = bdev_get_queue(bdev);
398 if (!m->pg_init_in_progress) 399 clone->rq_disk = bdev->bd_disk;
399 queue_work(kmultipathd, &m->process_queued_ios); 400 mpio->pgpath = pgpath;
400 r = DM_MAPIO_REQUEUE; 401 mpio->nr_bytes = nr_bytes;
401 } else if ((pgpath && m->queue_io) || 402 if (pgpath->pg->ps.type->start_io)
402 (!pgpath && m->queue_if_no_path)) { 403 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
403 /* Queue for the daemon to resubmit */ 404 &pgpath->path,
404 list_add_tail(&clone->queuelist, &m->queued_ios); 405 nr_bytes);
405 m->queue_size++; 406 } else {
406 if (!m->queue_io) 407 __pg_init_all_paths(m);
407 queue_work(kmultipathd, &m->process_queued_ios); 408 r = DM_MAPIO_REQUEUE;
408 pgpath = NULL; 409 }
409 r = DM_MAPIO_SUBMITTED; 410 } else {
410 } else if (pgpath) { 411 /* No path */
411 bdev = pgpath->path.dev->bdev; 412 if (__must_push_back(m))
412 clone->q = bdev_get_queue(bdev); 413 r = DM_MAPIO_REQUEUE;
413 clone->rq_disk = bdev->bd_disk; 414 else
414 } else if (__must_push_back(m)) 415 r = -EIO; /* Failed */
415 r = DM_MAPIO_REQUEUE; 416 }
416 else
417 r = -EIO; /* Failed */
418
419 mpio->pgpath = pgpath;
420 mpio->nr_bytes = nr_bytes;
421
422 if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
423 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
424 nr_bytes);
425 417
426 spin_unlock_irqrestore(&m->lock, flags); 418 spin_unlock_irqrestore(&m->lock, flags);
427 419
@@ -443,7 +435,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
443 else 435 else
444 m->saved_queue_if_no_path = queue_if_no_path; 436 m->saved_queue_if_no_path = queue_if_no_path;
445 m->queue_if_no_path = queue_if_no_path; 437 m->queue_if_no_path = queue_if_no_path;
446 if (!m->queue_if_no_path && m->queue_size) 438 if (!m->queue_if_no_path)
447 queue_work(kmultipathd, &m->process_queued_ios); 439 queue_work(kmultipathd, &m->process_queued_ios);
448 440
449 spin_unlock_irqrestore(&m->lock, flags); 441 spin_unlock_irqrestore(&m->lock, flags);
@@ -451,40 +443,6 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
451 return 0; 443 return 0;
452} 444}
453 445
454/*-----------------------------------------------------------------
455 * The multipath daemon is responsible for resubmitting queued ios.
456 *---------------------------------------------------------------*/
457
458static void dispatch_queued_ios(struct multipath *m)
459{
460 int r;
461 unsigned long flags;
462 union map_info *info;
463 struct request *clone, *n;
464 LIST_HEAD(cl);
465
466 spin_lock_irqsave(&m->lock, flags);
467 list_splice_init(&m->queued_ios, &cl);
468 spin_unlock_irqrestore(&m->lock, flags);
469
470 list_for_each_entry_safe(clone, n, &cl, queuelist) {
471 list_del_init(&clone->queuelist);
472
473 info = dm_get_rq_mapinfo(clone);
474
475 r = map_io(m, clone, info, 1);
476 if (r < 0) {
477 clear_mapinfo(m, info);
478 dm_kill_unmapped_request(clone, r);
479 } else if (r == DM_MAPIO_REMAPPED)
480 dm_dispatch_request(clone);
481 else if (r == DM_MAPIO_REQUEUE) {
482 clear_mapinfo(m, info);
483 dm_requeue_unmapped_request(clone);
484 }
485 }
486}
487
488static void process_queued_ios(struct work_struct *work) 446static void process_queued_ios(struct work_struct *work)
489{ 447{
490 struct multipath *m = 448 struct multipath *m =
@@ -509,7 +467,7 @@ static void process_queued_ios(struct work_struct *work)
509 467
510 spin_unlock_irqrestore(&m->lock, flags); 468 spin_unlock_irqrestore(&m->lock, flags);
511 if (!must_queue) 469 if (!must_queue)
512 dispatch_queued_ios(m); 470 dm_table_run_md_queue_async(m->ti->table);
513} 471}
514 472
515/* 473/*
@@ -987,7 +945,7 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
987 return DM_MAPIO_REQUEUE; 945 return DM_MAPIO_REQUEUE;
988 946
989 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 947 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
990 r = map_io(m, clone, map_context, 0); 948 r = map_io(m, clone, map_context);
991 if (r < 0 || r == DM_MAPIO_REQUEUE) 949 if (r < 0 || r == DM_MAPIO_REQUEUE)
992 clear_mapinfo(m, map_context); 950 clear_mapinfo(m, map_context);
993 951
@@ -1056,7 +1014,7 @@ static int reinstate_path(struct pgpath *pgpath)
1056 1014
1057 pgpath->is_active = 1; 1015 pgpath->is_active = 1;
1058 1016
1059 if (!m->nr_valid_paths++ && m->queue_size) { 1017 if (!m->nr_valid_paths++) {
1060 m->current_pgpath = NULL; 1018 m->current_pgpath = NULL;
1061 queue_work(kmultipathd, &m->process_queued_ios); 1019 queue_work(kmultipathd, &m->process_queued_ios);
1062 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1020 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
@@ -1435,7 +1393,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1435 1393
1436 /* Features */ 1394 /* Features */
1437 if (type == STATUSTYPE_INFO) 1395 if (type == STATUSTYPE_INFO)
1438 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); 1396 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
1439 else { 1397 else {
1440 DMEMIT("%u ", m->queue_if_no_path + 1398 DMEMIT("%u ", m->queue_if_no_path +
1441 (m->pg_init_retries > 0) * 2 + 1399 (m->pg_init_retries > 0) * 2 +
@@ -1686,7 +1644,7 @@ static int multipath_busy(struct dm_target *ti)
1686 spin_lock_irqsave(&m->lock, flags); 1644 spin_lock_irqsave(&m->lock, flags);
1687 1645
1688 /* pg_init in progress, requeue until done */ 1646 /* pg_init in progress, requeue until done */
1689 if (m->pg_init_in_progress) { 1647 if (!pg_ready(m)) {
1690 busy = 1; 1648 busy = 1;
1691 goto out; 1649 goto out;
1692 } 1650 }
@@ -1739,7 +1697,7 @@ out:
1739 *---------------------------------------------------------------*/ 1697 *---------------------------------------------------------------*/
1740static struct target_type multipath_target = { 1698static struct target_type multipath_target = {
1741 .name = "multipath", 1699 .name = "multipath",
1742 .version = {1, 6, 0}, 1700 .version = {1, 7, 0},
1743 .module = THIS_MODULE, 1701 .module = THIS_MODULE,
1744 .ctr = multipath_ctr, 1702 .ctr = multipath_ctr,
1745 .dtr = multipath_dtr, 1703 .dtr = multipath_dtr,