diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-09-14 08:55:09 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:13 -0400 |
commit | 242f9dcb8ba6f68fcd217a119a7648a4f69290e9 (patch) | |
tree | 1bfe245ffbc50d204d76665cd8f90d85100f86a1 /block/blk-core.c | |
parent | 608aeef17a91747d6303de4df5e2c2e6899a95e8 (diff) |
block: unify request timeout handling
Right now SCSI and others do their own command timeout handling.
Move those bits to the block layer.
Instead of having a timer per command, we try to be a bit more clever
and simply have one per-queue. This avoids the overhead of having to
tear down and setup a timer for each command, so it will result in a lot
less timer fiddling.
Signed-off-by: Mike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index f25eb9786d94..d768a8ddc173 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -110,6 +110,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
110 | memset(rq, 0, sizeof(*rq)); | 110 | memset(rq, 0, sizeof(*rq)); |
111 | 111 | ||
112 | INIT_LIST_HEAD(&rq->queuelist); | 112 | INIT_LIST_HEAD(&rq->queuelist); |
113 | INIT_LIST_HEAD(&rq->timeout_list); | ||
113 | rq->cpu = -1; | 114 | rq->cpu = -1; |
114 | rq->q = q; | 115 | rq->q = q; |
115 | rq->sector = rq->hard_sector = (sector_t) -1; | 116 | rq->sector = rq->hard_sector = (sector_t) -1; |
@@ -490,6 +491,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
490 | } | 491 | } |
491 | 492 | ||
492 | init_timer(&q->unplug_timer); | 493 | init_timer(&q->unplug_timer); |
494 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | ||
495 | INIT_LIST_HEAD(&q->timeout_list); | ||
493 | 496 | ||
494 | kobject_init(&q->kobj, &blk_queue_ktype); | 497 | kobject_init(&q->kobj, &blk_queue_ktype); |
495 | 498 | ||
@@ -897,6 +900,8 @@ EXPORT_SYMBOL(blk_start_queueing); | |||
897 | */ | 900 | */ |
898 | void blk_requeue_request(struct request_queue *q, struct request *rq) | 901 | void blk_requeue_request(struct request_queue *q, struct request *rq) |
899 | { | 902 | { |
903 | blk_delete_timer(rq); | ||
904 | blk_clear_rq_complete(rq); | ||
900 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 905 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); |
901 | 906 | ||
902 | if (blk_rq_tagged(rq)) | 907 | if (blk_rq_tagged(rq)) |
@@ -1650,6 +1655,8 @@ static void end_that_request_last(struct request *req, int error) | |||
1650 | { | 1655 | { |
1651 | struct gendisk *disk = req->rq_disk; | 1656 | struct gendisk *disk = req->rq_disk; |
1652 | 1657 | ||
1658 | blk_delete_timer(req); | ||
1659 | |||
1653 | if (blk_rq_tagged(req)) | 1660 | if (blk_rq_tagged(req)) |
1654 | blk_queue_end_tag(req->q, req); | 1661 | blk_queue_end_tag(req->q, req); |
1655 | 1662 | ||