aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-09-09 19:24:57 -0400
committerMike Snitzer <snitzer@redhat.com>2016-09-14 13:56:38 -0400
commita8ac51e4ab97765838ae6a07d6ff7f7bfaaa0ea3 (patch)
tree1bc9e863ee44b4991b9c467659503f681bb4398e
parent9f4c3f874a3ab8fb845dd2f04f4396ebc5c1f225 (diff)
dm rq: add DM_MAPIO_DELAY_REQUEUE to delay requeue of blk-mq requests
Otherwise blk-mq will immediately dispatch requests that are requeued via a BLK_MQ_RQ_QUEUE_BUSY return from blk_mq_ops .queue_rq. Delayed requeue is implemented using blk_mq_delay_kick_requeue_list() with a delay of 5 secs. In the context of DM multipath (all paths down) it doesn't make any sense to requeue more quickly. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-rq.c32
-rw-r--r--include/linux/device-mapper.h1
2 files changed, 19 insertions, 14 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 0d301d5a4d0b..dbced7b15931 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -336,20 +336,21 @@ static void dm_old_requeue_request(struct request *rq)
336 spin_unlock_irqrestore(q->queue_lock, flags); 336 spin_unlock_irqrestore(q->queue_lock, flags);
337} 337}
338 338
339static void dm_mq_requeue_request(struct request *rq) 339static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
340{ 340{
341 struct request_queue *q = rq->q; 341 struct request_queue *q = rq->q;
342 unsigned long flags; 342 unsigned long flags;
343 343
344 blk_mq_requeue_request(rq); 344 blk_mq_requeue_request(rq);
345
345 spin_lock_irqsave(q->queue_lock, flags); 346 spin_lock_irqsave(q->queue_lock, flags);
346 if (!blk_queue_stopped(q)) 347 if (!blk_queue_stopped(q))
347 blk_mq_kick_requeue_list(q); 348 blk_mq_delay_kick_requeue_list(q, msecs);
348 spin_unlock_irqrestore(q->queue_lock, flags); 349 spin_unlock_irqrestore(q->queue_lock, flags);
349} 350}
350 351
351static void dm_requeue_original_request(struct mapped_device *md, 352static void dm_requeue_original_request(struct mapped_device *md,
352 struct request *rq) 353 struct request *rq, bool delay_requeue)
353{ 354{
354 int rw = rq_data_dir(rq); 355 int rw = rq_data_dir(rq);
355 356
@@ -359,7 +360,7 @@ static void dm_requeue_original_request(struct mapped_device *md,
359 if (!rq->q->mq_ops) 360 if (!rq->q->mq_ops)
360 dm_old_requeue_request(rq); 361 dm_old_requeue_request(rq);
361 else 362 else
362 dm_mq_requeue_request(rq); 363 dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
363 364
364 rq_completed(md, rw, false); 365 rq_completed(md, rw, false);
365} 366}
@@ -389,7 +390,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
389 return; 390 return;
390 else if (r == DM_ENDIO_REQUEUE) 391 else if (r == DM_ENDIO_REQUEUE)
391 /* The target wants to requeue the I/O */ 392 /* The target wants to requeue the I/O */
392 dm_requeue_original_request(tio->md, tio->orig); 393 dm_requeue_original_request(tio->md, tio->orig, false);
393 else { 394 else {
394 DMWARN("unimplemented target endio return value: %d", r); 395 DMWARN("unimplemented target endio return value: %d", r);
395 BUG(); 396 BUG();
@@ -629,8 +630,8 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
629 630
630/* 631/*
631 * Returns: 632 * Returns:
632 * 0 : the request has been processed 633 * DM_MAPIO_* : the request has been processed as indicated
633 * DM_MAPIO_REQUEUE : the original request needs to be requeued 634 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
634 * < 0 : the request was completed due to failure 635 * < 0 : the request was completed due to failure
635 */ 636 */
636static int map_request(struct dm_rq_target_io *tio, struct request *rq, 637static int map_request(struct dm_rq_target_io *tio, struct request *rq,
@@ -643,6 +644,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
643 if (tio->clone) { 644 if (tio->clone) {
644 clone = tio->clone; 645 clone = tio->clone;
645 r = ti->type->map_rq(ti, clone, &tio->info); 646 r = ti->type->map_rq(ti, clone, &tio->info);
647 if (r == DM_MAPIO_DELAY_REQUEUE)
648 return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
646 } else { 649 } else {
647 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 650 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
648 if (r < 0) { 651 if (r < 0) {
@@ -650,9 +653,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
650 dm_kill_unmapped_request(rq, r); 653 dm_kill_unmapped_request(rq, r);
651 return r; 654 return r;
652 } 655 }
653 if (r != DM_MAPIO_REMAPPED) 656 if (r == DM_MAPIO_REMAPPED &&
654 return r; 657 setup_clone(clone, rq, tio, GFP_ATOMIC)) {
655 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
656 /* -ENOMEM */ 658 /* -ENOMEM */
657 ti->type->release_clone_rq(clone); 659 ti->type->release_clone_rq(clone);
658 return DM_MAPIO_REQUEUE; 660 return DM_MAPIO_REQUEUE;
@@ -671,7 +673,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
671 break; 673 break;
672 case DM_MAPIO_REQUEUE: 674 case DM_MAPIO_REQUEUE:
673 /* The target wants to requeue the I/O */ 675 /* The target wants to requeue the I/O */
674 dm_requeue_original_request(md, tio->orig); 676 break;
677 case DM_MAPIO_DELAY_REQUEUE:
678 /* The target wants to requeue the I/O after a delay */
679 dm_requeue_original_request(md, tio->orig, true);
675 break; 680 break;
676 default: 681 default:
677 if (r > 0) { 682 if (r > 0) {
@@ -681,10 +686,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
681 686
682 /* The target wants to complete the I/O */ 687 /* The target wants to complete the I/O */
683 dm_kill_unmapped_request(rq, r); 688 dm_kill_unmapped_request(rq, r);
684 return r;
685 } 689 }
686 690
687 return 0; 691 return r;
688} 692}
689 693
690static void dm_start_request(struct mapped_device *md, struct request *orig) 694static void dm_start_request(struct mapped_device *md, struct request *orig)
@@ -727,7 +731,7 @@ static void map_tio_request(struct kthread_work *work)
727 struct mapped_device *md = tio->md; 731 struct mapped_device *md = tio->md;
728 732
729 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 733 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
730 dm_requeue_original_request(md, rq); 734 dm_requeue_original_request(md, rq, false);
731} 735}
732 736
733ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 737ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 91acfce74a22..ef7962e84444 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state;
590#define DM_MAPIO_SUBMITTED 0 590#define DM_MAPIO_SUBMITTED 0
591#define DM_MAPIO_REMAPPED 1 591#define DM_MAPIO_REMAPPED 1
592#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 592#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
593#define DM_MAPIO_DELAY_REQUEUE 3
593 594
594#define dm_sector_div64(x, y)( \ 595#define dm_sector_div64(x, y)( \
595{ \ 596{ \