aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorShaohua Li <shli@fusionio.com>2012-11-09 02:44:27 -0500
committerJens Axboe <axboe@kernel.dk>2012-11-09 02:44:27 -0500
commitbee0393cc12b6d8f10e884e555a095e050e0b2b9 (patch)
treeeabe7526c63a6a965cf0aa5574c222ff2f431e13 /block/elevator.c
parent3d106fba2e7eb6967b1e2cc147a6894ec4307cef (diff)
block: recursive merge requests
In a workload, thread 1 accesses a, a+2, ..., thread 2 accesses a+1, a+3,.... When the requests are flushed to queue, a and a+1 are merged to (a, a+1), a+2 and a+3 too to (a+2, a+3), but (a, a+1) and (a+2, a+3) aren't merged. If we do recursive merge for such interleave access, some workloads throughput get improvement. A recent worload I'm checking on is swap, below change boostes the throughput around 5% ~ 10%. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 9b1d42b62f20..9edba1b8323e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -458,6 +458,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
458 struct request *rq) 458 struct request *rq)
459{ 459{
460 struct request *__rq; 460 struct request *__rq;
461 bool ret;
461 462
462 if (blk_queue_nomerges(q)) 463 if (blk_queue_nomerges(q))
463 return false; 464 return false;
@@ -471,14 +472,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
471 if (blk_queue_noxmerges(q)) 472 if (blk_queue_noxmerges(q))
472 return false; 473 return false;
473 474
475 ret = false;
474 /* 476 /*
475 * See if our hash lookup can find a potential backmerge. 477 * See if our hash lookup can find a potential backmerge.
476 */ 478 */
477 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); 479 while (1) {
478 if (__rq && blk_attempt_req_merge(q, __rq, rq)) 480 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
479 return true; 481 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
482 break;
480 483
481 return false; 484 /* The merged request could be merged with others, try again */
485 ret = true;
486 rq = __rq;
487 }
488
489 return ret;
482} 490}
483 491
484void elv_merged_request(struct request_queue *q, struct request *rq, int type) 492void elv_merged_request(struct request_queue *q, struct request *rq, int type)