diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-04-06 08:48:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-06 11:04:54 -0400 |
commit | aeb6fafb8fa53266d70ca7474fcda2bdaf96524a (patch) | |
tree | ebe8e0c616a9dbfdfe5184b64d0150ea02d3d1b2 /include/linux/blkdev.h | |
parent | 644b2d99b7a8677a56909a7b1fde31677eba4471 (diff) |
block: Add flag for telling the IO schedulers NOT to anticipate more IO
By default, CFQ will anticipate more IO from a given io context if the
previously completed IO was sync. This used to be fine, since the only
sync IO was reads and O_DIRECT writes. But with more "normal" sync writes
being used now, we don't want to anticipate for those.
Add a bio/request flag that informs the IO scheduler that this is a sync
request that we should not idle for. Introduce WRITE_ODIRECT specifically
for O_DIRECT writes, and make sure that the other sync writes set this
flag.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 67dae3bd881c..e03660964e02 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -118,6 +118,7 @@ enum rq_flag_bits { | |||
118 | __REQ_COPY_USER, /* contains copies of user pages */ | 118 | __REQ_COPY_USER, /* contains copies of user pages */ |
119 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 119 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
120 | __REQ_UNPLUG, /* unplug queue on submission */ | 120 | __REQ_UNPLUG, /* unplug queue on submission */ |
121 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | ||
121 | __REQ_NR_BITS, /* stops here */ | 122 | __REQ_NR_BITS, /* stops here */ |
122 | }; | 123 | }; |
123 | 124 | ||
@@ -145,6 +146,7 @@ enum rq_flag_bits { | |||
145 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 146 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
146 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 147 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
147 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | 148 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) |
149 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
148 | 150 | ||
149 | #define BLK_MAX_CDB 16 | 151 | #define BLK_MAX_CDB 16 |
150 | 152 | ||
@@ -633,6 +635,7 @@ static inline bool rq_is_sync(struct request *rq) | |||
633 | } | 635 | } |
634 | 636 | ||
635 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | 637 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) |
638 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
636 | 639 | ||
637 | static inline int blk_queue_full(struct request_queue *q, int sync) | 640 | static inline int blk_queue_full(struct request_queue *q, int sync) |
638 | { | 641 | { |