summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-10-29 13:14:52 -0400
committerJens Axboe <axboe@fb.com>2014-10-29 13:14:52 -0400
commit74c450521dd8d245b982da62592a18aa6f88b045 (patch)
treeb45928c02f0ee9051cd5cb257ad4bf1e0df125a8 /block
parent34b48db66e08ca1c1bc07cf305d672ac940268dc (diff)
blk-mq: add a 'list' parameter to ->queue_rq()
Since we have the notion of a 'last' request in a chain, we can use this to have the hardware optimize the issuing of requests. Add a list_head parameter to queue_rq that the driver can use to temporarily store hw commands for issue when 'last' is true. If we are doing a chain of requests, pass in a NULL list for the first request to force issue of that immediately, then batch the remainder for deferred issue until the last request has been sent. Instead of adding yet another argument to the hot ->queue_rq path, encapsulate the passed arguments in a blk_mq_queue_data structure. This is passed as a constant, and has been tested as faster than passing 4 (or even 3) args through ->queue_rq. Update drivers for the new ->queue_rq() prototype. There are no functional changes in this patch for drivers - if they don't use the passed in list, then they will just queue requests individually like before. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 68929bad9a6a..7e5303820452 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -680,6 +680,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
680 struct request_queue *q = hctx->queue; 680 struct request_queue *q = hctx->queue;
681 struct request *rq; 681 struct request *rq;
682 LIST_HEAD(rq_list); 682 LIST_HEAD(rq_list);
683 LIST_HEAD(driver_list);
684 struct list_head *dptr;
683 int queued; 685 int queued;
684 686
685 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); 687 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
@@ -706,16 +708,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
706 } 708 }
707 709
708 /* 710 /*
711 * Start off with dptr being NULL, so we start the first request
712 * immediately, even if we have more pending.
713 */
714 dptr = NULL;
715
716 /*
709 * Now process all the entries, sending them to the driver. 717 * Now process all the entries, sending them to the driver.
710 */ 718 */
711 queued = 0; 719 queued = 0;
712 while (!list_empty(&rq_list)) { 720 while (!list_empty(&rq_list)) {
721 struct blk_mq_queue_data bd;
713 int ret; 722 int ret;
714 723
715 rq = list_first_entry(&rq_list, struct request, queuelist); 724 rq = list_first_entry(&rq_list, struct request, queuelist);
716 list_del_init(&rq->queuelist); 725 list_del_init(&rq->queuelist);
717 726
718 ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list)); 727 bd.rq = rq;
728 bd.list = dptr;
729 bd.last = list_empty(&rq_list);
730
731 ret = q->mq_ops->queue_rq(hctx, &bd);
719 switch (ret) { 732 switch (ret) {
720 case BLK_MQ_RQ_QUEUE_OK: 733 case BLK_MQ_RQ_QUEUE_OK:
721 queued++; 734 queued++;
@@ -734,6 +747,13 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
734 747
735 if (ret == BLK_MQ_RQ_QUEUE_BUSY) 748 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
736 break; 749 break;
750
751 /*
752 * We've done the first request. If we have more than 1
753 * left in the list, set dptr to defer issue.
754 */
755 if (!dptr && rq_list.next != rq_list.prev)
756 dptr = &driver_list;
737 } 757 }
738 758
739 if (!queued) 759 if (!queued)
@@ -1153,6 +1173,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1153 } 1173 }
1154 1174
1155 if (is_sync) { 1175 if (is_sync) {
1176 struct blk_mq_queue_data bd = {
1177 .rq = rq,
1178 .list = NULL,
1179 .last = 1
1180 };
1156 int ret; 1181 int ret;
1157 1182
1158 blk_mq_bio_to_request(rq, bio); 1183 blk_mq_bio_to_request(rq, bio);
@@ -1162,7 +1187,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1162 * error (busy), just add it to our list as we previously 1187 * error (busy), just add it to our list as we previously
1163 * would have done 1188 * would have done
1164 */ 1189 */
1165 ret = q->mq_ops->queue_rq(data.hctx, rq, true); 1190 ret = q->mq_ops->queue_rq(data.hctx, &bd);
1166 if (ret == BLK_MQ_RQ_QUEUE_OK) 1191 if (ret == BLK_MQ_RQ_QUEUE_OK)
1167 goto done; 1192 goto done;
1168 else { 1193 else {