aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-12-17 10:44:05 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-17 13:19:54 -0500
commitc16d6b5a9f47d0e581882269fca1d73be60208b2 (patch)
tree778a7ca836078f6cca15630f25b0d652320f5cb8 /block/blk-mq-sched.c
parent7211aef86f79583e59b88a0aba0bc830566f7e8e (diff)
blk-mq: fix dispatch from sw queue
When a request is added to rq list of sw queue(ctx), the rq may be from a different type of hctx, especially after multi queue mapping is introduced. So when dispach request from sw queue via blk_mq_flush_busy_ctxs() or blk_mq_dequeue_from_ctx(), one request belonging to other queue type of hctx can be dispatched to current hctx in case that read queue or poll queue is enabled. This patch fixes this issue by introducing per-queue-type list. Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Changed by me to not use separately cacheline aligned lists, just place them all in the same cacheline where we had just the one list and lock before. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 056fa9baf44e..140933e4a7d1 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -302,11 +302,14 @@ EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
302 * too much time checking for merges. 302 * too much time checking for merges.
303 */ 303 */
304static bool blk_mq_attempt_merge(struct request_queue *q, 304static bool blk_mq_attempt_merge(struct request_queue *q,
305 struct blk_mq_hw_ctx *hctx,
305 struct blk_mq_ctx *ctx, struct bio *bio) 306 struct blk_mq_ctx *ctx, struct bio *bio)
306{ 307{
308 enum hctx_type type = hctx->type;
309
307 lockdep_assert_held(&ctx->lock); 310 lockdep_assert_held(&ctx->lock);
308 311
309 if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) { 312 if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio)) {
310 ctx->rq_merged++; 313 ctx->rq_merged++;
311 return true; 314 return true;
312 } 315 }
@@ -320,17 +323,19 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
320 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 323 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
321 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu); 324 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu);
322 bool ret = false; 325 bool ret = false;
326 enum hctx_type type;
323 327
324 if (e && e->type->ops.bio_merge) { 328 if (e && e->type->ops.bio_merge) {
325 blk_mq_put_ctx(ctx); 329 blk_mq_put_ctx(ctx);
326 return e->type->ops.bio_merge(hctx, bio); 330 return e->type->ops.bio_merge(hctx, bio);
327 } 331 }
328 332
333 type = hctx->type;
329 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 334 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
330 !list_empty_careful(&ctx->rq_list)) { 335 !list_empty_careful(&ctx->rq_lists[type])) {
331 /* default per sw-queue merge */ 336 /* default per sw-queue merge */
332 spin_lock(&ctx->lock); 337 spin_lock(&ctx->lock);
333 ret = blk_mq_attempt_merge(q, ctx, bio); 338 ret = blk_mq_attempt_merge(q, hctx, ctx, bio);
334 spin_unlock(&ctx->lock); 339 spin_unlock(&ctx->lock);
335 } 340 }
336 341