aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-08 07:19:51 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:45:54 -0500
commit73c101011926c5832e6e141682180c4debe2cf45 (patch)
treeb8eeb521a7833cb198d6f39d5a931d820e2a663f /include/linux/blkdev.h
parenta488e74976bf0a9bccecdd094378394942dacef1 (diff)
block: initial patch for on-stack per-task plugging
This patch adds support for creating a queuing context outside of the queue itself. This enables us to batch up pieces of IO before grabbing the block device queue lock and submitting them to the IO scheduler. The context is created on the stack of the process and assigned in the task structure, so that we can auto-unplug it if we hit a schedule event. The current queue plugging happens implicitly if IO is submitted to an empty device, yet callers have to remember to unplug that IO when they are going to wait for it. This is an ugly API and has caused bugs in the past. Additionally, it requires hacks in the vm (->sync_page() callback) to handle that logic. By switching to an explicit plugging scheme we make the API a lot nicer and can get rid of the ->sync_page() hack in the vm. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h42
1 files changed, 42 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f55b2a8b6610..5873037eeb91 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -871,6 +871,31 @@ struct request_queue *blk_alloc_queue(gfp_t);
871struct request_queue *blk_alloc_queue_node(gfp_t, int); 871struct request_queue *blk_alloc_queue_node(gfp_t, int);
872extern void blk_put_queue(struct request_queue *); 872extern void blk_put_queue(struct request_queue *);
873 873
874struct blk_plug {
875 unsigned long magic;
876 struct list_head list;
877 unsigned int should_sort;
878};
879
880extern void blk_start_plug(struct blk_plug *);
881extern void blk_finish_plug(struct blk_plug *);
882extern void __blk_flush_plug(struct task_struct *, struct blk_plug *);
883
884static inline void blk_flush_plug(struct task_struct *tsk)
885{
886 struct blk_plug *plug = tsk->plug;
887
888 if (unlikely(plug))
889 __blk_flush_plug(tsk, plug);
890}
891
892static inline bool blk_needs_flush_plug(struct task_struct *tsk)
893{
894 struct blk_plug *plug = tsk->plug;
895
896 return plug && !list_empty(&plug->list);
897}
898
874/* 899/*
875 * tag stuff 900 * tag stuff
876 */ 901 */
@@ -1294,6 +1319,23 @@ static inline long nr_blockdev_pages(void)
1294 return 0; 1319 return 0;
1295} 1320}
1296 1321
1322static inline void blk_start_plug(struct list_head *list)
1323{
1324}
1325
1326static inline void blk_finish_plug(struct list_head *list)
1327{
1328}
1329
1330static inline void blk_flush_plug(struct task_struct *tsk)
1331{
1332}
1333
1334static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1335{
1336 return false;
1337}
1338
1297#endif /* CONFIG_BLOCK */ 1339#endif /* CONFIG_BLOCK */
1298 1340
1299#endif 1341#endif