diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-05-30 03:26:07 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-05-30 12:43:58 -0400 |
commit | 9c5587346490ad4355e8de6ae402b76e55c411d5 (patch) | |
tree | c3d4ef3bb13f175030152a1721ad1d33c0aa7389 /block/blk-mq-sched.c | |
parent | 5de815a7eedfacf593817ef34634eaa9b75a1482 (diff) |
blk-mq: abstract out blk-mq-sched rq list iteration bio merge helper
No functional changes in this patch, just a prep patch for utilizing
this in an IO scheduler.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r-- | block/blk-mq-sched.c | 34 |
1 files changed, 24 insertions, 10 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 25c14c58385c..b0f2c2a40a0c 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
@@ -268,19 +268,16 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, | |||
268 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); | 268 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); |
269 | 269 | ||
270 | /* | 270 | /* |
271 | * Reverse check our software queue for entries that we could potentially | 271 | * Iterate list of requests and see if we can merge this bio with any |
272 | * merge with. Currently includes a hand-wavy stop count of 8, to not spend | 272 | * of them. |
273 | * too much time checking for merges. | ||
274 | */ | 273 | */ |
275 | static bool blk_mq_attempt_merge(struct request_queue *q, | 274 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
276 | struct blk_mq_ctx *ctx, struct bio *bio) | 275 | struct bio *bio) |
277 | { | 276 | { |
278 | struct request *rq; | 277 | struct request *rq; |
279 | int checked = 8; | 278 | int checked = 8; |
280 | 279 | ||
281 | lockdep_assert_held(&ctx->lock); | 280 | list_for_each_entry_reverse(rq, list, queuelist) { |
282 | |||
283 | list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { | ||
284 | bool merged = false; | 281 | bool merged = false; |
285 | 282 | ||
286 | if (!checked--) | 283 | if (!checked--) |
@@ -305,13 +302,30 @@ static bool blk_mq_attempt_merge(struct request_queue *q, | |||
305 | continue; | 302 | continue; |
306 | } | 303 | } |
307 | 304 | ||
308 | if (merged) | ||
309 | ctx->rq_merged++; | ||
310 | return merged; | 305 | return merged; |
311 | } | 306 | } |
312 | 307 | ||
313 | return false; | 308 | return false; |
314 | } | 309 | } |
310 | EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); | ||
311 | |||
312 | /* | ||
313 | * Reverse check our software queue for entries that we could potentially | ||
314 | * merge with. Currently includes a hand-wavy stop count of 8, to not spend | ||
315 | * too much time checking for merges. | ||
316 | */ | ||
317 | static bool blk_mq_attempt_merge(struct request_queue *q, | ||
318 | struct blk_mq_ctx *ctx, struct bio *bio) | ||
319 | { | ||
320 | lockdep_assert_held(&ctx->lock); | ||
321 | |||
322 | if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) { | ||
323 | ctx->rq_merged++; | ||
324 | return true; | ||
325 | } | ||
326 | |||
327 | return false; | ||
328 | } | ||
315 | 329 | ||
316 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) | 330 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) |
317 | { | 331 | { |