diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 16:14:58 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 15:27:21 -0500 |
commit | d732580b4eb31553c63744a47d590f770cafb8f0 (patch) | |
tree | ea4e8e21df1b639603693e6f5fdfc5a620cd8737 /include/linux/blkdev.h | |
parent | b2fab5acd28ead6f0dd6c3996ba23f0ef1772f15 (diff) |
block: implement blk_queue_bypass_start/end()
Rename and extend elv_queisce_start/end() to
blk_queue_bypass_start/end() which are exported and supports nesting
via @q->bypass_depth. Also add blk_queue_bypass() to test bypass
state.
This will be further extended and used for blkio_group management.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 606cf339bb56..315db1d91bc4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -389,6 +389,8 @@ struct request_queue { | |||
389 | 389 | ||
390 | struct mutex sysfs_lock; | 390 | struct mutex sysfs_lock; |
391 | 391 | ||
392 | int bypass_depth; | ||
393 | |||
392 | #if defined(CONFIG_BLK_DEV_BSG) | 394 | #if defined(CONFIG_BLK_DEV_BSG) |
393 | bsg_job_fn *bsg_job_fn; | 395 | bsg_job_fn *bsg_job_fn; |
394 | int bsg_job_size; | 396 | int bsg_job_size; |
@@ -406,7 +408,7 @@ struct request_queue { | |||
406 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 408 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
407 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 409 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
408 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 410 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
409 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ | 411 | #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ |
410 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ | 412 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
411 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ | 413 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
412 | #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ | 414 | #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ |
@@ -494,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
494 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 496 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
495 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 497 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
496 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | 498 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) |
499 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) | ||
497 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 500 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
498 | #define blk_queue_noxmerges(q) \ | 501 | #define blk_queue_noxmerges(q) \ |
499 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 502 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |