aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-07-03 11:03:16 -0400
committerJens Axboe <axboe@kernel.dk>2018-07-09 11:07:53 -0400
commit6e768717304bdbe8d2897ca8298f6b58863fdc41 (patch)
tree64f259002c9bcca0bd1f187cdd7d067e68a29d45 /block/blk-mq-sched.c
parentd893ff86034f7107f89d8b740c2b5902a21a49db (diff)
blk-mq: dequeue request one by one from sw queue if hctx is busy
It won't be efficient to dequeue request one by one from sw queue, but we have to do that when queue is busy for better merge performance. This patch takes the Exponential Weighted Moving Average(EWMA) to figure out if queue is busy, then only dequeue request one by one from sw queue when queue is busy. Fixes: b347689ffbca ("blk-mq-sched: improve dispatching from sw queue") Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Laurence Oberman <loberman@redhat.com> Cc: Omar Sandoval <osandov@fb.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Bart Van Assche <bart.vanassche@wdc.com> Cc: Hannes Reinecke <hare@suse.de> Reported-by: Kashyap Desai <kashyap.desai@broadcom.com> Tested-by: Kashyap Desai <kashyap.desai@broadcom.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index f3b4b5ceb4d1..fdc129e64cc4 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -206,15 +206,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
206 } 206 }
207 } else if (has_sched_dispatch) { 207 } else if (has_sched_dispatch) {
208 blk_mq_do_dispatch_sched(hctx); 208 blk_mq_do_dispatch_sched(hctx);
209 } else if (q->mq_ops->get_budget) { 209 } else if (hctx->dispatch_busy) {
210 /* 210 /* dequeue request one by one from sw queue if queue is busy */
211 * If we need to get budget before queuing request, we
212 * dequeue request one by one from sw queue for avoiding
213 * to mess up I/O merge when dispatch runs out of resource.
214 *
215 * TODO: get more budgets, and dequeue more requests in
216 * one time.
217 */
218 blk_mq_do_dispatch_ctx(hctx); 211 blk_mq_do_dispatch_ctx(hctx);
219 } else { 212 } else {
220 blk_mq_flush_busy_ctxs(hctx, &rq_list); 213 blk_mq_flush_busy_ctxs(hctx, &rq_list);