aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2008-10-01 10:12:15 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:20 -0400
commitef9e3facdf1fe1228721a7c295a76d1b7a0e57ec (patch)
tree33847b1e267895ea58c2399c06cdd23365c7dc73 /include
parent336c3d8ce771608815b65bcfa27a17a83b297328 (diff)
block: add lld busy state exporting interface
This patch adds an new interface, blk_lld_busy(), to check lld's busy state from the block layer. blk_lld_busy() calls down into low-level drivers for the checking if the drivers set q->lld_busy_fn() using blk_queue_lld_busy(). This resolves a performance problem on request stacking devices below. Some drivers like scsi mid layer stop dispatching request when they detect busy state on its low-level device like host/target/device. It allows other requests to stay in the I/O scheduler's queue for a chance of merging. Request stacking drivers like request-based dm should follow the same logic. However, there is no generic interface for the stacked device to check if the underlying device(s) are busy. If the request stacking driver dispatches and submits requests to the busy underlying device, the requests will stay in the underlying device's queue without a chance of merging. This causes performance problem on burst I/O load. With this patch, busy state of the underlying device is exported via q->lld_busy_fn(). So the request stacking driver can check it and stop dispatching requests if busy. The underlying device driver must return the busy state appropriately: 1: when the device driver can't process requests immediately. 0: when the device driver can process requests immediately, including abnormal situations where the device driver needs to kill all requests. Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0cf3e619fb21..9e0ee1a8254e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -269,6 +269,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
269typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 269typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
270typedef void (softirq_done_fn)(struct request *); 270typedef void (softirq_done_fn)(struct request *);
271typedef int (dma_drain_needed_fn)(struct request *); 271typedef int (dma_drain_needed_fn)(struct request *);
272typedef int (lld_busy_fn) (struct request_queue *q);
272 273
273enum blk_eh_timer_return { 274enum blk_eh_timer_return {
274 BLK_EH_NOT_HANDLED, 275 BLK_EH_NOT_HANDLED,
@@ -325,6 +326,7 @@ struct request_queue
325 softirq_done_fn *softirq_done_fn; 326 softirq_done_fn *softirq_done_fn;
326 rq_timed_out_fn *rq_timed_out_fn; 327 rq_timed_out_fn *rq_timed_out_fn;
327 dma_drain_needed_fn *dma_drain_needed; 328 dma_drain_needed_fn *dma_drain_needed;
329 lld_busy_fn *lld_busy_fn;
328 330
329 /* 331 /*
330 * Dispatch queue sorting 332 * Dispatch queue sorting
@@ -699,6 +701,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
699extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 701extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
700extern void blk_requeue_request(struct request_queue *, struct request *); 702extern void blk_requeue_request(struct request_queue *, struct request *);
701extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 703extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
704extern int blk_lld_busy(struct request_queue *q);
702extern int blk_insert_cloned_request(struct request_queue *q, 705extern int blk_insert_cloned_request(struct request_queue *q,
703 struct request *rq); 706 struct request *rq);
704extern void blk_plug_device(struct request_queue *); 707extern void blk_plug_device(struct request_queue *);
@@ -835,6 +838,7 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
835extern int blk_queue_dma_drain(struct request_queue *q, 838extern int blk_queue_dma_drain(struct request_queue *q,
836 dma_drain_needed_fn *dma_drain_needed, 839 dma_drain_needed_fn *dma_drain_needed,
837 void *buf, unsigned int size); 840 void *buf, unsigned int size);
841extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
838extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 842extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
839extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 843extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
840extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 844extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);