diff options
Diffstat (limited to 'block/blk-timeout.c')
-rw-r--r-- | block/blk-timeout.c | 155 |
1 files changed, 155 insertions, 0 deletions
diff --git a/block/blk-timeout.c b/block/blk-timeout.c new file mode 100644 index 000000000000..b36d07bf0afb --- /dev/null +++ b/block/blk-timeout.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * Functions related to generic timeout handling of requests. | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/blkdev.h> | ||
7 | |||
8 | #include "blk.h" | ||
9 | |||
10 | /* | ||
11 | * blk_delete_timer - Delete/cancel timer for a given function. | ||
12 | * @req: request that we are canceling timer for | ||
13 | * | ||
14 | */ | ||
15 | void blk_delete_timer(struct request *req) | ||
16 | { | ||
17 | struct request_queue *q = req->q; | ||
18 | |||
19 | /* | ||
20 | * Nothing to detach | ||
21 | */ | ||
22 | if (!q->rq_timed_out_fn || !req->deadline) | ||
23 | return; | ||
24 | |||
25 | list_del_init(&req->timeout_list); | ||
26 | |||
27 | if (list_empty(&q->timeout_list)) | ||
28 | del_timer(&q->timeout); | ||
29 | } | ||
30 | |||
31 | static void blk_rq_timed_out(struct request *req) | ||
32 | { | ||
33 | struct request_queue *q = req->q; | ||
34 | enum blk_eh_timer_return ret; | ||
35 | |||
36 | ret = q->rq_timed_out_fn(req); | ||
37 | switch (ret) { | ||
38 | case BLK_EH_HANDLED: | ||
39 | __blk_complete_request(req); | ||
40 | break; | ||
41 | case BLK_EH_RESET_TIMER: | ||
42 | blk_clear_rq_complete(req); | ||
43 | blk_add_timer(req); | ||
44 | break; | ||
45 | case BLK_EH_NOT_HANDLED: | ||
46 | /* | ||
47 | * LLD handles this for now but in the future | ||
48 | * we can send a request msg to abort the command | ||
49 | * and we can move more of the generic scsi eh code to | ||
50 | * the blk layer. | ||
51 | */ | ||
52 | break; | ||
53 | default: | ||
54 | printk(KERN_ERR "block: bad eh return: %d\n", ret); | ||
55 | break; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | void blk_rq_timed_out_timer(unsigned long data) | ||
60 | { | ||
61 | struct request_queue *q = (struct request_queue *) data; | ||
62 | unsigned long flags, uninitialized_var(next), next_set = 0; | ||
63 | struct request *rq, *tmp; | ||
64 | |||
65 | spin_lock_irqsave(q->queue_lock, flags); | ||
66 | |||
67 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { | ||
68 | if (time_after_eq(jiffies, rq->deadline)) { | ||
69 | list_del_init(&rq->timeout_list); | ||
70 | |||
71 | /* | ||
72 | * Check if we raced with end io completion | ||
73 | */ | ||
74 | if (blk_mark_rq_complete(rq)) | ||
75 | continue; | ||
76 | blk_rq_timed_out(rq); | ||
77 | } | ||
78 | if (!next_set) { | ||
79 | next = rq->deadline; | ||
80 | next_set = 1; | ||
81 | } else if (time_after(next, rq->deadline)) | ||
82 | next = rq->deadline; | ||
83 | } | ||
84 | |||
85 | if (next_set && !list_empty(&q->timeout_list)) | ||
86 | mod_timer(&q->timeout, round_jiffies(next)); | ||
87 | |||
88 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * blk_abort_request -- Request request recovery for the specified command | ||
93 | * @req: pointer to the request of interest | ||
94 | * | ||
95 | * This function requests that the block layer start recovery for the | ||
96 | * request by deleting the timer and calling the q's timeout function. | ||
97 | * LLDDs who implement their own error recovery MAY ignore the timeout | ||
98 | * event if they generated blk_abort_req. Must hold queue lock. | ||
99 | */ | ||
100 | void blk_abort_request(struct request *req) | ||
101 | { | ||
102 | blk_delete_timer(req); | ||
103 | blk_rq_timed_out(req); | ||
104 | } | ||
105 | EXPORT_SYMBOL_GPL(blk_abort_request); | ||
106 | |||
107 | /** | ||
108 | * blk_add_timer - Start timeout timer for a single request | ||
109 | * @req: request that is about to start running. | ||
110 | * | ||
111 | * Notes: | ||
112 | * Each request has its own timer, and as it is added to the queue, we | ||
113 | * set up the timer. When the request completes, we cancel the timer. | ||
114 | */ | ||
115 | void blk_add_timer(struct request *req) | ||
116 | { | ||
117 | struct request_queue *q = req->q; | ||
118 | unsigned long expiry; | ||
119 | |||
120 | if (!q->rq_timed_out_fn) | ||
121 | return; | ||
122 | |||
123 | BUG_ON(!list_empty(&req->timeout_list)); | ||
124 | BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); | ||
125 | |||
126 | if (req->timeout) | ||
127 | req->deadline = jiffies + req->timeout; | ||
128 | else { | ||
129 | req->deadline = jiffies + q->rq_timeout; | ||
130 | /* | ||
131 | * Some LLDs, like scsi, peek at the timeout to prevent | ||
132 | * a command from being retried forever. | ||
133 | */ | ||
134 | req->timeout = q->rq_timeout; | ||
135 | } | ||
136 | list_add_tail(&req->timeout_list, &q->timeout_list); | ||
137 | |||
138 | /* | ||
139 | * If the timer isn't already pending or this timeout is earlier | ||
140 | * than an existing one, modify the timer. Round to next nearest | ||
141 | * second. | ||
142 | */ | ||
143 | expiry = round_jiffies(req->deadline); | ||
144 | |||
145 | /* | ||
146 | * We use ->deadline == 0 to detect whether a timer was added or | ||
147 | * not, so just increase to next jiffy for that specific case | ||
148 | */ | ||
149 | if (unlikely(!req->deadline)) | ||
150 | req->deadline = 1; | ||
151 | |||
152 | if (!timer_pending(&q->timeout) || | ||
153 | time_before(expiry, q->timeout.expires)) | ||
154 | mod_timer(&q->timeout, expiry); | ||
155 | } | ||