aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-02-05 02:27:38 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-05 14:06:51 -0500
commit9a7a67af8bb02106f0fb01dd9d237332f874be9a (patch)
tree6ad56122d38d118e6d53caa94a60037291294a25 /block/ll_rw_blk.c
parent88a2a4ac6b671a4b0dd5d2d762418904c05f4104 (diff)
[PATCH] fix ordering on requeued request drainage
Previously, if a fs request which was being drained failed and got requeued, blk_do_ordered() didn't allow it to be reissued, which causes queue stall. This patch makes blk_do_ordered() use the sequence of each request to determine whether a request can be issued or not. This fixes the bug and simplifies code. Signed-off-by: Tejun Heo <htejun@gmail.com> Acked-by: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c38
1 files changed, 16 insertions, 22 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e5aad8314585..ee5ed98db4cd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
508 508
509int blk_do_ordered(request_queue_t *q, struct request **rqp) 509int blk_do_ordered(request_queue_t *q, struct request **rqp)
510{ 510{
511 struct request *rq = *rqp, *allowed_rq; 511 struct request *rq = *rqp;
512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
513 513
514 if (!q->ordseq) { 514 if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
532 } 532 }
533 } 533 }
534 534
535 /*
536 * Ordered sequence in progress
537 */
538
539 /* Special requests are not subject to ordering rules. */
540 if (!blk_fs_request(rq) &&
541 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
542 return 1;
543
535 if (q->ordered & QUEUE_ORDERED_TAG) { 544 if (q->ordered & QUEUE_ORDERED_TAG) {
545 /* Ordered by tag. Blocking the next barrier is enough. */
536 if (is_barrier && rq != &q->bar_rq) 546 if (is_barrier && rq != &q->bar_rq)
537 *rqp = NULL; 547 *rqp = NULL;
538 return 1; 548 } else {
549 /* Ordered by draining. Wait for turn. */
550 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
551 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
552 *rqp = NULL;
539 } 553 }
540 554
541 switch (blk_ordered_cur_seq(q)) {
542 case QUEUE_ORDSEQ_PREFLUSH:
543 allowed_rq = &q->pre_flush_rq;
544 break;
545 case QUEUE_ORDSEQ_BAR:
546 allowed_rq = &q->bar_rq;
547 break;
548 case QUEUE_ORDSEQ_POSTFLUSH:
549 allowed_rq = &q->post_flush_rq;
550 break;
551 default:
552 allowed_rq = NULL;
553 break;
554 }
555
556 if (rq != allowed_rq &&
557 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
558 rq == &q->post_flush_rq))
559 *rqp = NULL;
560
561 return 1; 555 return 1;
562} 556}
563 557