diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-21 05:14:27 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-21 05:14:27 -0400 |
commit | 5e84ea3a9c662dc2d7a48703a4468fad954a3b7f (patch) | |
tree | 3fa0fb26a7c8a970213584104cc2498ef46d60a3 /block/elevator.c | |
parent | 4345caba340f051e10847924fc078ae18ed6695c (diff) |
block: attempt to merge with existing requests on plug flush
One of the disadvantages of on-stack plugging is that we potentially
lose out on merging since all pending IO isn't always visible to
everybody. When we flush the on-stack plugs, right now we don't do
any checks to see if potential merge candidates could be utilized.
Correct this by adding a new insert variant, ELEVATOR_INSERT_SORT_MERGE.
It works just ELEVATOR_INSERT_SORT, but first checks whether we can
merge with an existing request before doing the insertion (if we fail
merging).
This fixes a regression with multiple processes issuing IO that
can be merged.
Thanks to Shaohua Li <shaohua.li@intel.com> for testing and fixing
an accounting bug.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 52 |
1 files changed, 49 insertions, 3 deletions
diff --git a/block/elevator.c b/block/elevator.c index 542ce826b401..c387d3168734 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -521,6 +521,40 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
521 | return ELEVATOR_NO_MERGE; | 521 | return ELEVATOR_NO_MERGE; |
522 | } | 522 | } |
523 | 523 | ||
524 | /* | ||
525 | * Attempt to do an insertion back merge. Only check for the case where | ||
526 | * we can append 'rq' to an existing request, so we can throw 'rq' away | ||
527 | * afterwards. | ||
528 | * | ||
529 | * Returns true if we merged, false otherwise | ||
530 | */ | ||
531 | static bool elv_attempt_insert_merge(struct request_queue *q, | ||
532 | struct request *rq) | ||
533 | { | ||
534 | struct request *__rq; | ||
535 | |||
536 | if (blk_queue_nomerges(q)) | ||
537 | return false; | ||
538 | |||
539 | /* | ||
540 | * First try one-hit cache. | ||
541 | */ | ||
542 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) | ||
543 | return true; | ||
544 | |||
545 | if (blk_queue_noxmerges(q)) | ||
546 | return false; | ||
547 | |||
548 | /* | ||
549 | * See if our hash lookup can find a potential backmerge. | ||
550 | */ | ||
551 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); | ||
552 | if (__rq && blk_attempt_req_merge(q, __rq, rq)) | ||
553 | return true; | ||
554 | |||
555 | return false; | ||
556 | } | ||
557 | |||
524 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) | 558 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
525 | { | 559 | { |
526 | struct elevator_queue *e = q->elevator; | 560 | struct elevator_queue *e = q->elevator; |
@@ -538,14 +572,18 @@ void elv_merge_requests(struct request_queue *q, struct request *rq, | |||
538 | struct request *next) | 572 | struct request *next) |
539 | { | 573 | { |
540 | struct elevator_queue *e = q->elevator; | 574 | struct elevator_queue *e = q->elevator; |
575 | const int next_sorted = next->cmd_flags & REQ_SORTED; | ||
541 | 576 | ||
542 | if (e->ops->elevator_merge_req_fn) | 577 | if (next_sorted && e->ops->elevator_merge_req_fn) |
543 | e->ops->elevator_merge_req_fn(q, rq, next); | 578 | e->ops->elevator_merge_req_fn(q, rq, next); |
544 | 579 | ||
545 | elv_rqhash_reposition(q, rq); | 580 | elv_rqhash_reposition(q, rq); |
546 | elv_rqhash_del(q, next); | ||
547 | 581 | ||
548 | q->nr_sorted--; | 582 | if (next_sorted) { |
583 | elv_rqhash_del(q, next); | ||
584 | q->nr_sorted--; | ||
585 | } | ||
586 | |||
549 | q->last_merge = rq; | 587 | q->last_merge = rq; |
550 | } | 588 | } |
551 | 589 | ||
@@ -647,6 +685,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
647 | __blk_run_queue(q, false); | 685 | __blk_run_queue(q, false); |
648 | break; | 686 | break; |
649 | 687 | ||
688 | case ELEVATOR_INSERT_SORT_MERGE: | ||
689 | /* | ||
690 | * If we succeed in merging this request with one in the | ||
691 | * queue already, we are done - rq has now been freed, | ||
692 | * so no need to do anything further. | ||
693 | */ | ||
694 | if (elv_attempt_insert_merge(q, rq)) | ||
695 | break; | ||
650 | case ELEVATOR_INSERT_SORT: | 696 | case ELEVATOR_INSERT_SORT: |
651 | BUG_ON(rq->cmd_type != REQ_TYPE_FS && | 697 | BUG_ON(rq->cmd_type != REQ_TYPE_FS && |
652 | !(rq->cmd_flags & REQ_DISCARD)); | 698 | !(rq->cmd_flags & REQ_DISCARD)); |