aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlan D. Brunelle <Alan.Brunelle@hp.com>2008-04-29 08:44:19 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-04-29 08:48:55 -0400
commitac9fafa1243640349aa481adf473db283a695766 (patch)
tree155c2371cca8971638d781269f39fa015bc6509c /include
parentd7e3c3249ef23b4617393c69fe464765b4ff1645 (diff)
block: Skip I/O merges when disabled
The block I/O + elevator + I/O scheduler code spend a lot of time trying to merge I/Os -- rightfully so under "normal" circumstances. However, if one were to know that the incoming I/O stream was /very/ random in nature, the cycles are wasted. This patch adds a per-request_queue tunable that (when set) disables merge attempts (beyond the simple one-hit cache check), thus freeing up a non-trivial amount of CPU cycles. Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 08df1ea8bac4..c09696a90d6a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -408,6 +408,7 @@ struct request_queue
408#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 408#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
409#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 409#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
410#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 410#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
411#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
411 412
412static inline void queue_flag_set_unlocked(unsigned int flag, 413static inline void queue_flag_set_unlocked(unsigned int flag,
413 struct request_queue *q) 414 struct request_queue *q)
@@ -476,6 +477,7 @@ enum {
476#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 477#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
477#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 478#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
478#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 479#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
480#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
479#define blk_queue_flushing(q) ((q)->ordseq) 481#define blk_queue_flushing(q) ((q)->ordseq)
480 482
481#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 483#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)