diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-21 05:14:27 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-21 05:14:27 -0400 |
commit | 5e84ea3a9c662dc2d7a48703a4468fad954a3b7f (patch) | |
tree | 3fa0fb26a7c8a970213584104cc2498ef46d60a3 | |
parent | 4345caba340f051e10847924fc078ae18ed6695c (diff) |
block: attempt to merge with existing requests on plug flush
One of the disadvantages of on-stack plugging is that we potentially
lose out on merging since all pending IO isn't always visible to
everybody. When we flush the on-stack plugs, right now we don't do
any checks to see if potential merge candidates could be utilized.
Correct this by adding a new insert variant, ELEVATOR_INSERT_SORT_MERGE.
It works just ELEVATOR_INSERT_SORT, but first checks whether we can
merge with an existing request before doing the insertion (if we fail
merging).
This fixes a regression with multiple processes issuing IO that
can be merged.
Thanks to Shaohua Li <shaohua.li@intel.com> for testing and fixing
an accounting bug.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 6 | ||||
-rw-r--r-- | block/blk.h | 2 | ||||
-rw-r--r-- | block/elevator.c | 52 | ||||
-rw-r--r-- | include/linux/elevator.h | 1 |
5 files changed, 59 insertions, 4 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e1fcf7a24668..525693237a4a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2685,7 +2685,7 @@ static void flush_plug_list(struct blk_plug *plug) | |||
2685 | /* | 2685 | /* |
2686 | * rq is already accounted, so use raw insert | 2686 | * rq is already accounted, so use raw insert |
2687 | */ | 2687 | */ |
2688 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT); | 2688 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); |
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | if (q) { | 2691 | if (q) { |
diff --git a/block/blk-merge.c b/block/blk-merge.c index ea85e20d5e94..cfcc37cb222b 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -465,3 +465,9 @@ int attempt_front_merge(struct request_queue *q, struct request *rq) | |||
465 | 465 | ||
466 | return 0; | 466 | return 0; |
467 | } | 467 | } |
468 | |||
469 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | ||
470 | struct request *next) | ||
471 | { | ||
472 | return attempt_merge(q, rq, next); | ||
473 | } | ||
diff --git a/block/blk.h b/block/blk.h index 49d21af81d07..c8db371a921d 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -103,6 +103,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
103 | struct bio *bio); | 103 | struct bio *bio); |
104 | int attempt_back_merge(struct request_queue *q, struct request *rq); | 104 | int attempt_back_merge(struct request_queue *q, struct request *rq); |
105 | int attempt_front_merge(struct request_queue *q, struct request *rq); | 105 | int attempt_front_merge(struct request_queue *q, struct request *rq); |
106 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | ||
107 | struct request *next); | ||
106 | void blk_recalc_rq_segments(struct request *rq); | 108 | void blk_recalc_rq_segments(struct request *rq); |
107 | void blk_rq_set_mixed_merge(struct request *rq); | 109 | void blk_rq_set_mixed_merge(struct request *rq); |
108 | 110 | ||
diff --git a/block/elevator.c b/block/elevator.c index 542ce826b401..c387d3168734 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -521,6 +521,40 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
521 | return ELEVATOR_NO_MERGE; | 521 | return ELEVATOR_NO_MERGE; |
522 | } | 522 | } |
523 | 523 | ||
524 | /* | ||
525 | * Attempt to do an insertion back merge. Only check for the case where | ||
526 | * we can append 'rq' to an existing request, so we can throw 'rq' away | ||
527 | * afterwards. | ||
528 | * | ||
529 | * Returns true if we merged, false otherwise | ||
530 | */ | ||
531 | static bool elv_attempt_insert_merge(struct request_queue *q, | ||
532 | struct request *rq) | ||
533 | { | ||
534 | struct request *__rq; | ||
535 | |||
536 | if (blk_queue_nomerges(q)) | ||
537 | return false; | ||
538 | |||
539 | /* | ||
540 | * First try one-hit cache. | ||
541 | */ | ||
542 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) | ||
543 | return true; | ||
544 | |||
545 | if (blk_queue_noxmerges(q)) | ||
546 | return false; | ||
547 | |||
548 | /* | ||
549 | * See if our hash lookup can find a potential backmerge. | ||
550 | */ | ||
551 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); | ||
552 | if (__rq && blk_attempt_req_merge(q, __rq, rq)) | ||
553 | return true; | ||
554 | |||
555 | return false; | ||
556 | } | ||
557 | |||
524 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) | 558 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
525 | { | 559 | { |
526 | struct elevator_queue *e = q->elevator; | 560 | struct elevator_queue *e = q->elevator; |
@@ -538,14 +572,18 @@ void elv_merge_requests(struct request_queue *q, struct request *rq, | |||
538 | struct request *next) | 572 | struct request *next) |
539 | { | 573 | { |
540 | struct elevator_queue *e = q->elevator; | 574 | struct elevator_queue *e = q->elevator; |
575 | const int next_sorted = next->cmd_flags & REQ_SORTED; | ||
541 | 576 | ||
542 | if (e->ops->elevator_merge_req_fn) | 577 | if (next_sorted && e->ops->elevator_merge_req_fn) |
543 | e->ops->elevator_merge_req_fn(q, rq, next); | 578 | e->ops->elevator_merge_req_fn(q, rq, next); |
544 | 579 | ||
545 | elv_rqhash_reposition(q, rq); | 580 | elv_rqhash_reposition(q, rq); |
546 | elv_rqhash_del(q, next); | ||
547 | 581 | ||
548 | q->nr_sorted--; | 582 | if (next_sorted) { |
583 | elv_rqhash_del(q, next); | ||
584 | q->nr_sorted--; | ||
585 | } | ||
586 | |||
549 | q->last_merge = rq; | 587 | q->last_merge = rq; |
550 | } | 588 | } |
551 | 589 | ||
@@ -647,6 +685,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
647 | __blk_run_queue(q, false); | 685 | __blk_run_queue(q, false); |
648 | break; | 686 | break; |
649 | 687 | ||
688 | case ELEVATOR_INSERT_SORT_MERGE: | ||
689 | /* | ||
690 | * If we succeed in merging this request with one in the | ||
691 | * queue already, we are done - rq has now been freed, | ||
692 | * so no need to do anything further. | ||
693 | */ | ||
694 | if (elv_attempt_insert_merge(q, rq)) | ||
695 | break; | ||
650 | case ELEVATOR_INSERT_SORT: | 696 | case ELEVATOR_INSERT_SORT: |
651 | BUG_ON(rq->cmd_type != REQ_TYPE_FS && | 697 | BUG_ON(rq->cmd_type != REQ_TYPE_FS && |
652 | !(rq->cmd_flags & REQ_DISCARD)); | 698 | !(rq->cmd_flags & REQ_DISCARD)); |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ec6f72b84477..d93efcc44570 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -166,6 +166,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); | |||
166 | #define ELEVATOR_INSERT_SORT 3 | 166 | #define ELEVATOR_INSERT_SORT 3 |
167 | #define ELEVATOR_INSERT_REQUEUE 4 | 167 | #define ELEVATOR_INSERT_REQUEUE 4 |
168 | #define ELEVATOR_INSERT_FLUSH 5 | 168 | #define ELEVATOR_INSERT_FLUSH 5 |
169 | #define ELEVATOR_INSERT_SORT_MERGE 6 | ||
169 | 170 | ||
170 | /* | 171 | /* |
171 | * return values from elevator_may_queue_fn | 172 | * return values from elevator_may_queue_fn |