aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-08 07:19:51 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:45:54 -0500
commit73c101011926c5832e6e141682180c4debe2cf45 (patch)
treeb8eeb521a7833cb198d6f39d5a931d820e2a663f /block/elevator.c
parenta488e74976bf0a9bccecdd094378394942dacef1 (diff)
block: initial patch for on-stack per-task plugging
This patch adds support for creating a queuing context outside of the queue itself. This enables us to batch up pieces of IO before grabbing the block device queue lock and submitting them to the IO scheduler. The context is created on the stack of the process and assigned in the task structure, so that we can auto-unplug it if we hit a schedule event. The current queue plugging happens implicitly if IO is submitted to an empty device, yet callers have to remember to unplug that IO when they are going to wait for it. This is an ugly API and has caused bugs in the past. Additionally, it requires hacks in the vm (->sync_page() callback) to handle that logic. By switching to an explicit plugging scheme we make the API a lot nicer and can get rid of the ->sync_page() hack in the vm. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/block/elevator.c b/block/elevator.c
index f98e92edc937..25713927c0d3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -113,7 +113,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
113} 113}
114EXPORT_SYMBOL(elv_rq_merge_ok); 114EXPORT_SYMBOL(elv_rq_merge_ok);
115 115
116static inline int elv_try_merge(struct request *__rq, struct bio *bio) 116int elv_try_merge(struct request *__rq, struct bio *bio)
117{ 117{
118 int ret = ELEVATOR_NO_MERGE; 118 int ret = ELEVATOR_NO_MERGE;
119 119
@@ -421,6 +421,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
421 struct list_head *entry; 421 struct list_head *entry;
422 int stop_flags; 422 int stop_flags;
423 423
424 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
425
424 if (q->last_merge == rq) 426 if (q->last_merge == rq)
425 q->last_merge = NULL; 427 q->last_merge = NULL;
426 428
@@ -696,6 +698,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
696void __elv_add_request(struct request_queue *q, struct request *rq, int where, 698void __elv_add_request(struct request_queue *q, struct request *rq, int where,
697 int plug) 699 int plug)
698{ 700{
701 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
702
699 if (rq->cmd_flags & REQ_SOFTBARRIER) { 703 if (rq->cmd_flags & REQ_SOFTBARRIER) {
700 /* barriers are scheduling boundary, update end_sector */ 704 /* barriers are scheduling boundary, update end_sector */
701 if (rq->cmd_type == REQ_TYPE_FS || 705 if (rq->cmd_type == REQ_TYPE_FS ||