aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c28
-rw-r--r--block/blk-settings.c6
-rw-r--r--include/linux/blkdev.h4
3 files changed, 38 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c66333d8e48d..b2d0ac8b760e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2100,6 +2100,34 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2100 rq->rq_disk = bio->bi_bdev->bd_disk; 2100 rq->rq_disk = bio->bi_bdev->bd_disk;
2101} 2101}
2102 2102
2103/**
2104 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2105 * @q : the queue of the device being checked
2106 *
2107 * Description:
2108 * Check if underlying low-level drivers of a device are busy.
2109 * If the drivers want to export their busy state, they must set own
2110 * exporting function using blk_queue_lld_busy() first.
2111 *
2112 * Basically, this function is used only by request stacking drivers
2113 * to stop dispatching requests to underlying devices when underlying
2114 * devices are busy. This behavior helps more I/O merging on the queue
2115 * of the request stacking driver and prevents I/O throughput regression
2116 * on burst I/O load.
2117 *
2118 * Return:
2119 * 0 - Not busy (The request stacking driver should dispatch request)
2120 * 1 - Busy (The request stacking driver should stop dispatching request)
2121 */
2122int blk_lld_busy(struct request_queue *q)
2123{
2124 if (q->lld_busy_fn)
2125 return q->lld_busy_fn(q);
2126
2127 return 0;
2128}
2129EXPORT_SYMBOL_GPL(blk_lld_busy);
2130
2103int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2131int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2104{ 2132{
2105 return queue_work(kblockd_workqueue, work); 2133 return queue_work(kblockd_workqueue, work);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 1d0330d0b40a..b21dcdb64151 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -89,6 +89,12 @@ void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
89} 89}
90EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); 90EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
91 91
92void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
93{
94 q->lld_busy_fn = fn;
95}
96EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
97
92/** 98/**
93 * blk_queue_make_request - define an alternate make_request function for a device 99 * blk_queue_make_request - define an alternate make_request function for a device
94 * @q: the request queue for the device to be affected 100 * @q: the request queue for the device to be affected
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0cf3e619fb21..9e0ee1a8254e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -269,6 +269,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
269typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 269typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
270typedef void (softirq_done_fn)(struct request *); 270typedef void (softirq_done_fn)(struct request *);
271typedef int (dma_drain_needed_fn)(struct request *); 271typedef int (dma_drain_needed_fn)(struct request *);
272typedef int (lld_busy_fn) (struct request_queue *q);
272 273
273enum blk_eh_timer_return { 274enum blk_eh_timer_return {
274 BLK_EH_NOT_HANDLED, 275 BLK_EH_NOT_HANDLED,
@@ -325,6 +326,7 @@ struct request_queue
325 softirq_done_fn *softirq_done_fn; 326 softirq_done_fn *softirq_done_fn;
326 rq_timed_out_fn *rq_timed_out_fn; 327 rq_timed_out_fn *rq_timed_out_fn;
327 dma_drain_needed_fn *dma_drain_needed; 328 dma_drain_needed_fn *dma_drain_needed;
329 lld_busy_fn *lld_busy_fn;
328 330
329 /* 331 /*
330 * Dispatch queue sorting 332 * Dispatch queue sorting
@@ -699,6 +701,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
699extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 701extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
700extern void blk_requeue_request(struct request_queue *, struct request *); 702extern void blk_requeue_request(struct request_queue *, struct request *);
701extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 703extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
704extern int blk_lld_busy(struct request_queue *q);
702extern int blk_insert_cloned_request(struct request_queue *q, 705extern int blk_insert_cloned_request(struct request_queue *q,
703 struct request *rq); 706 struct request *rq);
704extern void blk_plug_device(struct request_queue *); 707extern void blk_plug_device(struct request_queue *);
@@ -835,6 +838,7 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
835extern int blk_queue_dma_drain(struct request_queue *q, 838extern int blk_queue_dma_drain(struct request_queue *q,
836 dma_drain_needed_fn *dma_drain_needed, 839 dma_drain_needed_fn *dma_drain_needed,
837 void *buf, unsigned int size); 840 void *buf, unsigned int size);
841extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
838extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 842extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
839extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 843extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
840extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 844extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);