aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-09-13 12:16:14 -0400
committerMike Snitzer <snitzer@redhat.com>2016-09-15 11:15:50 -0400
commitfbc39b4ca3bed38c6d62c658af2157d2ec9efa03 (patch)
tree2d85e2355285e5a7de97940546e2d10821427e34
parenta8ac51e4ab97765838ae6a07d6ff7f7bfaaa0ea3 (diff)
dm rq: reduce arguments passed to map_request() and dm_requeue_original_request()
Signed-off-by: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.com>
-rw-r--r--drivers/md/dm-rq.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index dbced7b15931..8eefc0ad7a59 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -349,9 +349,10 @@ static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
349 spin_unlock_irqrestore(q->queue_lock, flags); 349 spin_unlock_irqrestore(q->queue_lock, flags);
350} 350}
351 351
352static void dm_requeue_original_request(struct mapped_device *md, 352static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
353 struct request *rq, bool delay_requeue)
354{ 353{
354 struct mapped_device *md = tio->md;
355 struct request *rq = tio->orig;
355 int rw = rq_data_dir(rq); 356 int rw = rq_data_dir(rq);
356 357
357 rq_end_stats(md, rq); 358 rq_end_stats(md, rq);
@@ -390,7 +391,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
390 return; 391 return;
391 else if (r == DM_ENDIO_REQUEUE) 392 else if (r == DM_ENDIO_REQUEUE)
392 /* The target wants to requeue the I/O */ 393 /* The target wants to requeue the I/O */
393 dm_requeue_original_request(tio->md, tio->orig, false); 394 dm_requeue_original_request(tio, false);
394 else { 395 else {
395 DMWARN("unimplemented target endio return value: %d", r); 396 DMWARN("unimplemented target endio return value: %d", r);
396 BUG(); 397 BUG();
@@ -634,11 +635,12 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
634 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued 635 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
635 * < 0 : the request was completed due to failure 636 * < 0 : the request was completed due to failure
636 */ 637 */
637static int map_request(struct dm_rq_target_io *tio, struct request *rq, 638static int map_request(struct dm_rq_target_io *tio)
638 struct mapped_device *md)
639{ 639{
640 int r; 640 int r;
641 struct dm_target *ti = tio->ti; 641 struct dm_target *ti = tio->ti;
642 struct mapped_device *md = tio->md;
643 struct request *rq = tio->orig;
642 struct request *clone = NULL; 644 struct request *clone = NULL;
643 645
644 if (tio->clone) { 646 if (tio->clone) {
@@ -676,7 +678,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
676 break; 678 break;
677 case DM_MAPIO_DELAY_REQUEUE: 679 case DM_MAPIO_DELAY_REQUEUE:
678 /* The target wants to requeue the I/O after a delay */ 680 /* The target wants to requeue the I/O after a delay */
679 dm_requeue_original_request(md, tio->orig, true); 681 dm_requeue_original_request(tio, true);
680 break; 682 break;
681 default: 683 default:
682 if (r > 0) { 684 if (r > 0) {
@@ -727,11 +729,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
727static void map_tio_request(struct kthread_work *work) 729static void map_tio_request(struct kthread_work *work)
728{ 730{
729 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 731 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
730 struct request *rq = tio->orig;
731 struct mapped_device *md = tio->md;
732 732
733 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 733 if (map_request(tio) == DM_MAPIO_REQUEUE)
734 dm_requeue_original_request(md, rq, false); 734 dm_requeue_original_request(tio, false);
735} 735}
736 736
737ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 737ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
@@ -917,7 +917,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
917 tio->ti = ti; 917 tio->ti = ti;
918 918
919 /* Direct call is fine since .queue_rq allows allocations */ 919 /* Direct call is fine since .queue_rq allows allocations */
920 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 920 if (map_request(tio) == DM_MAPIO_REQUEUE) {
921 /* Undo dm_start_request() before requeuing */ 921 /* Undo dm_start_request() before requeuing */
922 rq_end_stats(md, rq); 922 rq_end_stats(md, rq);
923 rq_completed(md, rq_data_dir(rq), false); 923 rq_completed(md, rq_data_dir(rq), false);