aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 05:56:16 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 06:35:36 -0400
commit28e7d1845216538303bb95d679d8fd4de50e2f1a (patch)
tree0ef56dc0d7c894657c4ae71a3e8da6e1164fb933 /block/blk-core.c
parentdd831006d5be7f74c3fe7aef82380c51c3637960 (diff)
block: drop barrier ordering by queue draining
Filesystems will take all the responsibilities for ordering requests around commit writes and will only indicate how the commit writes themselves should be handled by block layers. This patch drops barrier ordering by queue draining from block layer. Ordering by draining implementation was somewhat invasive to request handling. List of notable changes follow. * Each queue has 1 bit color which is flipped on each barrier issue. This is used to track whether a given request is issued before the current barrier or not. REQ_ORDERED_COLOR flag and coloring implementation in __elv_add_request() are removed. * Requests which shouldn't be processed yet for draining were stalled by returning -EAGAIN from blk_do_ordered() according to the test result between blk_ordered_req_seq() and blk_blk_ordered_cur_seq(). This logic is removed. * Draining completion logic in elv_completed_request() removed. * All barrier sequence requests were queued to request queue and then trckled to lower layer according to progress and thus maintaining request orders during requeue was necessary. This is replaced by queueing the next request in the barrier sequence only after the current one is complete from blk_ordered_complete_seq(), which removes the need for multiple proxy requests in struct request_queue and the request sorting logic in the ELEVATOR_INSERT_REQUEUE path of elv_insert(). * As barriers no longer have ordering constraints, there's no need to dump the whole elevator onto the dispatch queue on each barrier. Insert barriers at the front instead. * If other barrier requests come to the front of the dispatch queue while one is already in progress, they are stored in q->pending_barriers and restored to dispatch queue one-by-one after each barrier completion from blk_ordered_complete_seq(). Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f8d37a8e2c55..d316662682c8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -520,6 +520,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
520 init_timer(&q->unplug_timer); 520 init_timer(&q->unplug_timer);
521 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 521 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
522 INIT_LIST_HEAD(&q->timeout_list); 522 INIT_LIST_HEAD(&q->timeout_list);
523 INIT_LIST_HEAD(&q->pending_barriers);
523 INIT_WORK(&q->unplug_work, blk_unplug_work); 524 INIT_WORK(&q->unplug_work, blk_unplug_work);
524 525
525 kobject_init(&q->kobj, &blk_queue_ktype); 526 kobject_init(&q->kobj, &blk_queue_ktype);
@@ -1185,6 +1186,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1185 const bool sync = (bio->bi_rw & REQ_SYNC); 1186 const bool sync = (bio->bi_rw & REQ_SYNC);
1186 const bool unplug = (bio->bi_rw & REQ_UNPLUG); 1187 const bool unplug = (bio->bi_rw & REQ_UNPLUG);
1187 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1188 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1189 int where = ELEVATOR_INSERT_SORT;
1188 int rw_flags; 1190 int rw_flags;
1189 1191
1190 /* REQ_HARDBARRIER is no more */ 1192 /* REQ_HARDBARRIER is no more */
@@ -1203,7 +1205,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1203 1205
1204 spin_lock_irq(q->queue_lock); 1206 spin_lock_irq(q->queue_lock);
1205 1207
1206 if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) 1208 if (bio->bi_rw & REQ_HARDBARRIER) {
1209 where = ELEVATOR_INSERT_FRONT;
1210 goto get_rq;
1211 }
1212
1213 if (elv_queue_empty(q))
1207 goto get_rq; 1214 goto get_rq;
1208 1215
1209 el_ret = elv_merge(q, &req, bio); 1216 el_ret = elv_merge(q, &req, bio);
@@ -1303,7 +1310,7 @@ get_rq:
1303 1310
1304 /* insert the request into the elevator */ 1311 /* insert the request into the elevator */
1305 drive_stat_acct(req, 1); 1312 drive_stat_acct(req, 1);
1306 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 1313 __elv_add_request(q, req, where, 0);
1307out: 1314out:
1308 if (unplug || !queue_should_plug(q)) 1315 if (unplug || !queue_should_plug(q))
1309 __generic_unplug_device(q); 1316 __generic_unplug_device(q);