diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-09-14 08:55:09 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:13 -0400 |
commit | 242f9dcb8ba6f68fcd217a119a7648a4f69290e9 (patch) | |
tree | 1bfe245ffbc50d204d76665cd8f90d85100f86a1 /include/linux/blkdev.h | |
parent | 608aeef17a91747d6303de4df5e2c2e6899a95e8 (diff) |
block: unify request timeout handling
Right now SCSI and others do their own command timeout handling.
Move those bits to the block layer.
Instead of having a timer per command, we try to be a bit more clever
and simply have one per-queue. This avoids the overhead of having to
tear down and setup a timer for each command, so it will result in a lot
less timer fiddling.
Signed-off-by: Mike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9c2549260427..067f28b80072 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -147,6 +147,7 @@ struct request { | |||
147 | 147 | ||
148 | unsigned int cmd_flags; | 148 | unsigned int cmd_flags; |
149 | enum rq_cmd_type_bits cmd_type; | 149 | enum rq_cmd_type_bits cmd_type; |
150 | unsigned long atomic_flags; | ||
150 | 151 | ||
151 | /* Maintain bio traversal state for part by part I/O submission. | 152 | /* Maintain bio traversal state for part by part I/O submission. |
152 | * hard_* are block layer internals, no driver should touch them! | 153 | * hard_* are block layer internals, no driver should touch them! |
@@ -214,6 +215,8 @@ struct request { | |||
214 | void *data; | 215 | void *data; |
215 | void *sense; | 216 | void *sense; |
216 | 217 | ||
218 | unsigned long deadline; | ||
219 | struct list_head timeout_list; | ||
217 | unsigned int timeout; | 220 | unsigned int timeout; |
218 | int retries; | 221 | int retries; |
219 | 222 | ||
@@ -266,6 +269,14 @@ typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | |||
266 | typedef void (softirq_done_fn)(struct request *); | 269 | typedef void (softirq_done_fn)(struct request *); |
267 | typedef int (dma_drain_needed_fn)(struct request *); | 270 | typedef int (dma_drain_needed_fn)(struct request *); |
268 | 271 | ||
272 | enum blk_eh_timer_return { | ||
273 | BLK_EH_NOT_HANDLED, | ||
274 | BLK_EH_HANDLED, | ||
275 | BLK_EH_RESET_TIMER, | ||
276 | }; | ||
277 | |||
278 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | ||
279 | |||
269 | enum blk_queue_state { | 280 | enum blk_queue_state { |
270 | Queue_down, | 281 | Queue_down, |
271 | Queue_up, | 282 | Queue_up, |
@@ -311,6 +322,7 @@ struct request_queue | |||
311 | merge_bvec_fn *merge_bvec_fn; | 322 | merge_bvec_fn *merge_bvec_fn; |
312 | prepare_flush_fn *prepare_flush_fn; | 323 | prepare_flush_fn *prepare_flush_fn; |
313 | softirq_done_fn *softirq_done_fn; | 324 | softirq_done_fn *softirq_done_fn; |
325 | rq_timed_out_fn *rq_timed_out_fn; | ||
314 | dma_drain_needed_fn *dma_drain_needed; | 326 | dma_drain_needed_fn *dma_drain_needed; |
315 | 327 | ||
316 | /* | 328 | /* |
@@ -386,6 +398,10 @@ struct request_queue | |||
386 | unsigned int nr_sorted; | 398 | unsigned int nr_sorted; |
387 | unsigned int in_flight; | 399 | unsigned int in_flight; |
388 | 400 | ||
401 | unsigned int rq_timeout; | ||
402 | struct timer_list timeout; | ||
403 | struct list_head timeout_list; | ||
404 | |||
389 | /* | 405 | /* |
390 | * sg stuff | 406 | * sg stuff |
391 | */ | 407 | */ |
@@ -770,6 +786,8 @@ extern int blk_end_request_callback(struct request *rq, int error, | |||
770 | unsigned int nr_bytes, | 786 | unsigned int nr_bytes, |
771 | int (drv_callback)(struct request *)); | 787 | int (drv_callback)(struct request *)); |
772 | extern void blk_complete_request(struct request *); | 788 | extern void blk_complete_request(struct request *); |
789 | extern void __blk_complete_request(struct request *); | ||
790 | extern void blk_abort_request(struct request *); | ||
773 | 791 | ||
774 | /* | 792 | /* |
775 | * blk_end_request() takes bytes instead of sectors as a complete size. | 793 | * blk_end_request() takes bytes instead of sectors as a complete size. |
@@ -811,6 +829,8 @@ extern void blk_queue_dma_alignment(struct request_queue *, int); | |||
811 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 829 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
812 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 830 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
813 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | 831 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); |
832 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | ||
833 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | ||
814 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 834 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
815 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 835 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
816 | extern int blk_do_ordered(struct request_queue *, struct request **); | 836 | extern int blk_do_ordered(struct request_queue *, struct request **); |