aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 05:56:16 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 06:35:36 -0400
commit28e7d1845216538303bb95d679d8fd4de50e2f1a (patch)
tree0ef56dc0d7c894657c4ae71a3e8da6e1164fb933 /block/elevator.c
parentdd831006d5be7f74c3fe7aef82380c51c3637960 (diff)
block: drop barrier ordering by queue draining
Filesystems will take all the responsibilities for ordering requests around commit writes and will only indicate how the commit writes themselves should be handled by block layers. This patch drops barrier ordering by queue draining from block layer. Ordering by draining implementation was somewhat invasive to request handling. List of notable changes follow. * Each queue has 1 bit color which is flipped on each barrier issue. This is used to track whether a given request is issued before the current barrier or not. REQ_ORDERED_COLOR flag and coloring implementation in __elv_add_request() are removed. * Requests which shouldn't be processed yet for draining were stalled by returning -EAGAIN from blk_do_ordered() according to the test result between blk_ordered_req_seq() and blk_blk_ordered_cur_seq(). This logic is removed. * Draining completion logic in elv_completed_request() removed. * All barrier sequence requests were queued to request queue and then trckled to lower layer according to progress and thus maintaining request orders during requeue was necessary. This is replaced by queueing the next request in the barrier sequence only after the current one is complete from blk_ordered_complete_seq(), which removes the need for multiple proxy requests in struct request_queue and the request sorting logic in the ELEVATOR_INSERT_REQUEUE path of elv_insert(). * As barriers no longer have ordering constraints, there's no need to dump the whole elevator onto the dispatch queue on each barrier. Insert barriers at the front instead. * If other barrier requests come to the front of the dispatch queue while one is already in progress, they are stored in q->pending_barriers and restored to dispatch queue one-by-one after each barrier completion from blk_ordered_complete_seq(). Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c79
1 files changed, 9 insertions, 70 deletions
diff --git a/block/elevator.c b/block/elevator.c
index ec585c9554d3..241c69c45c5f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -617,8 +617,6 @@ void elv_quiesce_end(struct request_queue *q)
617 617
618void elv_insert(struct request_queue *q, struct request *rq, int where) 618void elv_insert(struct request_queue *q, struct request *rq, int where)
619{ 619{
620 struct list_head *pos;
621 unsigned ordseq;
622 int unplug_it = 1; 620 int unplug_it = 1;
623 621
624 trace_block_rq_insert(q, rq); 622 trace_block_rq_insert(q, rq);
@@ -626,9 +624,16 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
626 rq->q = q; 624 rq->q = q;
627 625
628 switch (where) { 626 switch (where) {
627 case ELEVATOR_INSERT_REQUEUE:
628 /*
629 * Most requeues happen because of a busy condition,
630 * don't force unplug of the queue for that case.
631 * Clear unplug_it and fall through.
632 */
633 unplug_it = 0;
634
629 case ELEVATOR_INSERT_FRONT: 635 case ELEVATOR_INSERT_FRONT:
630 rq->cmd_flags |= REQ_SOFTBARRIER; 636 rq->cmd_flags |= REQ_SOFTBARRIER;
631
632 list_add(&rq->queuelist, &q->queue_head); 637 list_add(&rq->queuelist, &q->queue_head);
633 break; 638 break;
634 639
@@ -668,36 +673,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
668 q->elevator->ops->elevator_add_req_fn(q, rq); 673 q->elevator->ops->elevator_add_req_fn(q, rq);
669 break; 674 break;
670 675
671 case ELEVATOR_INSERT_REQUEUE:
672 /*
673 * If ordered flush isn't in progress, we do front
674 * insertion; otherwise, requests should be requeued
675 * in ordseq order.
676 */
677 rq->cmd_flags |= REQ_SOFTBARRIER;
678
679 /*
680 * Most requeues happen because of a busy condition,
681 * don't force unplug of the queue for that case.
682 */
683 unplug_it = 0;
684
685 if (q->ordseq == 0) {
686 list_add(&rq->queuelist, &q->queue_head);
687 break;
688 }
689
690 ordseq = blk_ordered_req_seq(rq);
691
692 list_for_each(pos, &q->queue_head) {
693 struct request *pos_rq = list_entry_rq(pos);
694 if (ordseq <= blk_ordered_req_seq(pos_rq))
695 break;
696 }
697
698 list_add_tail(&rq->queuelist, pos);
699 break;
700
701 default: 676 default:
702 printk(KERN_ERR "%s: bad insertion point %d\n", 677 printk(KERN_ERR "%s: bad insertion point %d\n",
703 __func__, where); 678 __func__, where);
@@ -716,26 +691,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
716void __elv_add_request(struct request_queue *q, struct request *rq, int where, 691void __elv_add_request(struct request_queue *q, struct request *rq, int where,
717 int plug) 692 int plug)
718{ 693{
719 if (q->ordcolor)
720 rq->cmd_flags |= REQ_ORDERED_COLOR;
721
722 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 694 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
723 /* 695 /* barriers are scheduling boundary, update end_sector */
724 * toggle ordered color
725 */
726 if (rq->cmd_flags & REQ_HARDBARRIER)
727 q->ordcolor ^= 1;
728
729 /*
730 * barriers implicitly indicate back insertion
731 */
732 if (where == ELEVATOR_INSERT_SORT)
733 where = ELEVATOR_INSERT_BACK;
734
735 /*
736 * this request is scheduling boundary, update
737 * end_sector
738 */
739 if (rq->cmd_type == REQ_TYPE_FS || 696 if (rq->cmd_type == REQ_TYPE_FS ||
740 (rq->cmd_flags & REQ_DISCARD)) { 697 (rq->cmd_flags & REQ_DISCARD)) {
741 q->end_sector = rq_end_sector(rq); 698 q->end_sector = rq_end_sector(rq);
@@ -855,24 +812,6 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
855 e->ops->elevator_completed_req_fn) 812 e->ops->elevator_completed_req_fn)
856 e->ops->elevator_completed_req_fn(q, rq); 813 e->ops->elevator_completed_req_fn(q, rq);
857 } 814 }
858
859 /*
860 * Check if the queue is waiting for fs requests to be
861 * drained for flush sequence.
862 */
863 if (unlikely(q->ordseq)) {
864 struct request *next = NULL;
865
866 if (!list_empty(&q->queue_head))
867 next = list_entry_rq(q->queue_head.next);
868
869 if (!queue_in_flight(q) &&
870 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
871 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
872 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
873 __blk_run_queue(q);
874 }
875 }
876} 815}
877 816
878#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) 817#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)