aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2016-06-02 01:18:48 -0400
committerJens Axboe <axboe@fb.com>2016-06-02 13:47:32 -0400
commit87c279e613f848c691111b29d49de8df3f4f56da (patch)
treee703f929d5b33f435b393ffd4bc55623613e5254
parent62a584fe05eef1f80ed49a286a29328f1a224fb9 (diff)
blk-mq: really fix plug list flushing for nomerge queues
Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues") updated blk_mq_make_request() to set request_count even when blk_queue_nomerges() returns true. However, blk_mq_make_request() only does limited plugging and doesn't use request_count; blk_sq_make_request() is the one that should have been fixed. Do that and get rid of the unnecessary work in the mq version. Fixes: 0809e3ac6231 ("block: fix plug list flushing for nomerge queues") Signed-off-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Ming Lei <tom.leiming@gmail.com> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 29cbc1b5fbdb..f9b9049b1284 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1262 1262
1263 blk_queue_split(q, &bio, q->bio_split); 1263 blk_queue_split(q, &bio, q->bio_split);
1264 1264
1265 if (!is_flush_fua && !blk_queue_nomerges(q)) { 1265 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1266 if (blk_attempt_plug_merge(q, bio, &request_count, 1266 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1267 &same_queue_rq)) 1267 return BLK_QC_T_NONE;
1268 return BLK_QC_T_NONE;
1269 } else
1270 request_count = blk_plug_queued_count(q);
1271 1268
1272 rq = blk_mq_map_request(q, bio, &data); 1269 rq = blk_mq_map_request(q, bio, &data);
1273 if (unlikely(!rq)) 1270 if (unlikely(!rq))
@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1358 1355
1359 blk_queue_split(q, &bio, q->bio_split); 1356 blk_queue_split(q, &bio, q->bio_split);
1360 1357
1361 if (!is_flush_fua && !blk_queue_nomerges(q) && 1358 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1362 blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1359 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1363 return BLK_QC_T_NONE; 1360 return BLK_QC_T_NONE;
1361 } else
1362 request_count = blk_plug_queued_count(q);
1364 1363
1365 rq = blk_mq_map_request(q, bio, &data); 1364 rq = blk_mq_map_request(q, bio, &data);
1366 if (unlikely(!rq)) 1365 if (unlikely(!rq))