diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2008-10-01 10:12:15 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:20 -0400 |
commit | ef9e3facdf1fe1228721a7c295a76d1b7a0e57ec (patch) | |
tree | 33847b1e267895ea58c2399c06cdd23365c7dc73 /include/linux/blkdev.h | |
parent | 336c3d8ce771608815b65bcfa27a17a83b297328 (diff) |
block: add lld busy state exporting interface
This patch adds an new interface, blk_lld_busy(), to check lld's
busy state from the block layer.
blk_lld_busy() calls down into low-level drivers for the checking
if the drivers set q->lld_busy_fn() using blk_queue_lld_busy().
This resolves a performance problem on request stacking devices below.
Some drivers like scsi mid layer stop dispatching request when
they detect busy state on its low-level device like host/target/device.
It allows other requests to stay in the I/O scheduler's queue
for a chance of merging.
Request stacking drivers like request-based dm should follow
the same logic.
However, there is no generic interface for the stacked device
to check if the underlying device(s) are busy.
If the request stacking driver dispatches and submits requests to
the busy underlying device, the requests will stay in
the underlying device's queue without a chance of merging.
This causes performance problem on burst I/O load.
With this patch, busy state of the underlying device is exported
via q->lld_busy_fn(). So the request stacking driver can check it
and stop dispatching requests if busy.
The underlying device driver must return the busy state appropriately:
1: when the device driver can't process requests immediately.
0: when the device driver can process requests immediately,
including abnormal situations where the device driver needs
to kill all requests.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0cf3e619fb21..9e0ee1a8254e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -269,6 +269,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | |||
269 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | 269 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
270 | typedef void (softirq_done_fn)(struct request *); | 270 | typedef void (softirq_done_fn)(struct request *); |
271 | typedef int (dma_drain_needed_fn)(struct request *); | 271 | typedef int (dma_drain_needed_fn)(struct request *); |
272 | typedef int (lld_busy_fn) (struct request_queue *q); | ||
272 | 273 | ||
273 | enum blk_eh_timer_return { | 274 | enum blk_eh_timer_return { |
274 | BLK_EH_NOT_HANDLED, | 275 | BLK_EH_NOT_HANDLED, |
@@ -325,6 +326,7 @@ struct request_queue | |||
325 | softirq_done_fn *softirq_done_fn; | 326 | softirq_done_fn *softirq_done_fn; |
326 | rq_timed_out_fn *rq_timed_out_fn; | 327 | rq_timed_out_fn *rq_timed_out_fn; |
327 | dma_drain_needed_fn *dma_drain_needed; | 328 | dma_drain_needed_fn *dma_drain_needed; |
329 | lld_busy_fn *lld_busy_fn; | ||
328 | 330 | ||
329 | /* | 331 | /* |
330 | * Dispatch queue sorting | 332 | * Dispatch queue sorting |
@@ -699,6 +701,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | |||
699 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 701 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
700 | extern void blk_requeue_request(struct request_queue *, struct request *); | 702 | extern void blk_requeue_request(struct request_queue *, struct request *); |
701 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 703 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
704 | extern int blk_lld_busy(struct request_queue *q); | ||
702 | extern int blk_insert_cloned_request(struct request_queue *q, | 705 | extern int blk_insert_cloned_request(struct request_queue *q, |
703 | struct request *rq); | 706 | struct request *rq); |
704 | extern void blk_plug_device(struct request_queue *); | 707 | extern void blk_plug_device(struct request_queue *); |
@@ -835,6 +838,7 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | |||
835 | extern int blk_queue_dma_drain(struct request_queue *q, | 838 | extern int blk_queue_dma_drain(struct request_queue *q, |
836 | dma_drain_needed_fn *dma_drain_needed, | 839 | dma_drain_needed_fn *dma_drain_needed, |
837 | void *buf, unsigned int size); | 840 | void *buf, unsigned int size); |
841 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | ||
838 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 842 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
839 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 843 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
840 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 844 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |