aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2006-01-12 09:39:26 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 12:05:39 -0500
commit1bc691d357c646700b9523d2aeca02847d3fb3f4 (patch)
tree430e13982f894c44e4a33dee188b3c718ec989ed /block
parent593195f9b2309693f27b402f34573f7920b82c3e (diff)
[PATCH] fix queue stalling while barrier sequencing
If ordered tag isn't supported, request ordering for barrier sequencing is performed by queue draining, which basically hangs the request queue until elv_completed_request() reports completion of all previous fs requests. The condition check in elv_completed_request() was only performed for fs requests. If a special request is queued between the last to-be-drained request and the barrier sequence, draining is never completed and the queue is stalled forever. This patch moves the end-of-draining condition check such that it's performed for all requests. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jens Axboe <axboe@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'block')
-rw-r--r--block/elevator.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 99a4d7b2f8ad..1d0759178e4b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -610,23 +610,23 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
610 * request is released from the driver, io must be done 610 * request is released from the driver, io must be done
611 */ 611 */
612 if (blk_account_rq(rq)) { 612 if (blk_account_rq(rq)) {
613 struct request *first_rq = list_entry_rq(q->queue_head.next);
614
615 q->in_flight--; 613 q->in_flight--;
614 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
615 e->ops->elevator_completed_req_fn(q, rq);
616 }
616 617
617 /* 618 /*
618 * Check if the queue is waiting for fs requests to be 619 * Check if the queue is waiting for fs requests to be
619 * drained for flush sequence. 620 * drained for flush sequence.
620 */ 621 */
621 if (q->ordseq && q->in_flight == 0 && 622 if (unlikely(q->ordseq)) {
623 struct request *first_rq = list_entry_rq(q->queue_head.next);
624 if (q->in_flight == 0 &&
622 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 625 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
623 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { 626 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
624 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 627 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
625 q->request_fn(q); 628 q->request_fn(q);
626 } 629 }
627
628 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
629 e->ops->elevator_completed_req_fn(q, rq);
630 } 630 }
631} 631}
632 632