diff options
Diffstat (limited to 'block/blk-tag.c')
-rw-r--r-- | block/blk-tag.c | 396 |
1 files changed, 396 insertions, 0 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c new file mode 100644 index 000000000000..d1fd300e8aea --- /dev/null +++ b/block/blk-tag.c | |||
@@ -0,0 +1,396 @@ | |||
1 | /* | ||
2 | * Functions related to tagged command queuing | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | /** | ||
10 | * blk_queue_find_tag - find a request by its tag and queue | ||
11 | * @q: The request queue for the device | ||
12 | * @tag: The tag of the request | ||
13 | * | ||
14 | * Notes: | ||
15 | * Should be used when a device returns a tag and you want to match | ||
16 | * it with a request. | ||
17 | * | ||
18 | * no locks need be held. | ||
19 | **/ | ||
20 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) | ||
21 | { | ||
22 | return blk_map_queue_find_tag(q->queue_tags, tag); | ||
23 | } | ||
24 | |||
25 | EXPORT_SYMBOL(blk_queue_find_tag); | ||
26 | |||
27 | /** | ||
28 | * __blk_free_tags - release a given set of tag maintenance info | ||
29 | * @bqt: the tag map to free | ||
30 | * | ||
31 | * Tries to free the specified @bqt@. Returns true if it was | ||
32 | * actually freed and false if there are still references using it | ||
33 | */ | ||
34 | static int __blk_free_tags(struct blk_queue_tag *bqt) | ||
35 | { | ||
36 | int retval; | ||
37 | |||
38 | retval = atomic_dec_and_test(&bqt->refcnt); | ||
39 | if (retval) { | ||
40 | BUG_ON(bqt->busy); | ||
41 | |||
42 | kfree(bqt->tag_index); | ||
43 | bqt->tag_index = NULL; | ||
44 | |||
45 | kfree(bqt->tag_map); | ||
46 | bqt->tag_map = NULL; | ||
47 | |||
48 | kfree(bqt); | ||
49 | } | ||
50 | |||
51 | return retval; | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * __blk_queue_free_tags - release tag maintenance info | ||
56 | * @q: the request queue for the device | ||
57 | * | ||
58 | * Notes: | ||
59 | * blk_cleanup_queue() will take care of calling this function, if tagging | ||
60 | * has been used. So there's no need to call this directly. | ||
61 | **/ | ||
62 | void __blk_queue_free_tags(struct request_queue *q) | ||
63 | { | ||
64 | struct blk_queue_tag *bqt = q->queue_tags; | ||
65 | |||
66 | if (!bqt) | ||
67 | return; | ||
68 | |||
69 | __blk_free_tags(bqt); | ||
70 | |||
71 | q->queue_tags = NULL; | ||
72 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * blk_free_tags - release a given set of tag maintenance info | ||
77 | * @bqt: the tag map to free | ||
78 | * | ||
79 | * For externally managed @bqt@ frees the map. Callers of this | ||
80 | * function must guarantee to have released all the queues that | ||
81 | * might have been using this tag map. | ||
82 | */ | ||
83 | void blk_free_tags(struct blk_queue_tag *bqt) | ||
84 | { | ||
85 | if (unlikely(!__blk_free_tags(bqt))) | ||
86 | BUG(); | ||
87 | } | ||
88 | EXPORT_SYMBOL(blk_free_tags); | ||
89 | |||
90 | /** | ||
91 | * blk_queue_free_tags - release tag maintenance info | ||
92 | * @q: the request queue for the device | ||
93 | * | ||
94 | * Notes: | ||
95 | * This is used to disabled tagged queuing to a device, yet leave | ||
96 | * queue in function. | ||
97 | **/ | ||
98 | void blk_queue_free_tags(struct request_queue *q) | ||
99 | { | ||
100 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
101 | } | ||
102 | |||
103 | EXPORT_SYMBOL(blk_queue_free_tags); | ||
104 | |||
105 | static int | ||
106 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) | ||
107 | { | ||
108 | struct request **tag_index; | ||
109 | unsigned long *tag_map; | ||
110 | int nr_ulongs; | ||
111 | |||
112 | if (q && depth > q->nr_requests * 2) { | ||
113 | depth = q->nr_requests * 2; | ||
114 | printk(KERN_ERR "%s: adjusted depth to %d\n", | ||
115 | __FUNCTION__, depth); | ||
116 | } | ||
117 | |||
118 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); | ||
119 | if (!tag_index) | ||
120 | goto fail; | ||
121 | |||
122 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
123 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | ||
124 | if (!tag_map) | ||
125 | goto fail; | ||
126 | |||
127 | tags->real_max_depth = depth; | ||
128 | tags->max_depth = depth; | ||
129 | tags->tag_index = tag_index; | ||
130 | tags->tag_map = tag_map; | ||
131 | |||
132 | return 0; | ||
133 | fail: | ||
134 | kfree(tag_index); | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | |||
138 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | ||
139 | int depth) | ||
140 | { | ||
141 | struct blk_queue_tag *tags; | ||
142 | |||
143 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | ||
144 | if (!tags) | ||
145 | goto fail; | ||
146 | |||
147 | if (init_tag_map(q, tags, depth)) | ||
148 | goto fail; | ||
149 | |||
150 | tags->busy = 0; | ||
151 | atomic_set(&tags->refcnt, 1); | ||
152 | return tags; | ||
153 | fail: | ||
154 | kfree(tags); | ||
155 | return NULL; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * blk_init_tags - initialize the tag info for an external tag map | ||
160 | * @depth: the maximum queue depth supported | ||
161 | * @tags: the tag to use | ||
162 | **/ | ||
163 | struct blk_queue_tag *blk_init_tags(int depth) | ||
164 | { | ||
165 | return __blk_queue_init_tags(NULL, depth); | ||
166 | } | ||
167 | EXPORT_SYMBOL(blk_init_tags); | ||
168 | |||
169 | /** | ||
170 | * blk_queue_init_tags - initialize the queue tag info | ||
171 | * @q: the request queue for the device | ||
172 | * @depth: the maximum queue depth supported | ||
173 | * @tags: the tag to use | ||
174 | **/ | ||
175 | int blk_queue_init_tags(struct request_queue *q, int depth, | ||
176 | struct blk_queue_tag *tags) | ||
177 | { | ||
178 | int rc; | ||
179 | |||
180 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | ||
181 | |||
182 | if (!tags && !q->queue_tags) { | ||
183 | tags = __blk_queue_init_tags(q, depth); | ||
184 | |||
185 | if (!tags) | ||
186 | goto fail; | ||
187 | } else if (q->queue_tags) { | ||
188 | if ((rc = blk_queue_resize_tags(q, depth))) | ||
189 | return rc; | ||
190 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
191 | return 0; | ||
192 | } else | ||
193 | atomic_inc(&tags->refcnt); | ||
194 | |||
195 | /* | ||
196 | * assign it, all done | ||
197 | */ | ||
198 | q->queue_tags = tags; | ||
199 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | ||
200 | INIT_LIST_HEAD(&q->tag_busy_list); | ||
201 | return 0; | ||
202 | fail: | ||
203 | kfree(tags); | ||
204 | return -ENOMEM; | ||
205 | } | ||
206 | |||
207 | EXPORT_SYMBOL(blk_queue_init_tags); | ||
208 | |||
209 | /** | ||
210 | * blk_queue_resize_tags - change the queueing depth | ||
211 | * @q: the request queue for the device | ||
212 | * @new_depth: the new max command queueing depth | ||
213 | * | ||
214 | * Notes: | ||
215 | * Must be called with the queue lock held. | ||
216 | **/ | ||
217 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) | ||
218 | { | ||
219 | struct blk_queue_tag *bqt = q->queue_tags; | ||
220 | struct request **tag_index; | ||
221 | unsigned long *tag_map; | ||
222 | int max_depth, nr_ulongs; | ||
223 | |||
224 | if (!bqt) | ||
225 | return -ENXIO; | ||
226 | |||
227 | /* | ||
228 | * if we already have large enough real_max_depth. just | ||
229 | * adjust max_depth. *NOTE* as requests with tag value | ||
230 | * between new_depth and real_max_depth can be in-flight, tag | ||
231 | * map can not be shrunk blindly here. | ||
232 | */ | ||
233 | if (new_depth <= bqt->real_max_depth) { | ||
234 | bqt->max_depth = new_depth; | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Currently cannot replace a shared tag map with a new | ||
240 | * one, so error out if this is the case | ||
241 | */ | ||
242 | if (atomic_read(&bqt->refcnt) != 1) | ||
243 | return -EBUSY; | ||
244 | |||
245 | /* | ||
246 | * save the old state info, so we can copy it back | ||
247 | */ | ||
248 | tag_index = bqt->tag_index; | ||
249 | tag_map = bqt->tag_map; | ||
250 | max_depth = bqt->real_max_depth; | ||
251 | |||
252 | if (init_tag_map(q, bqt, new_depth)) | ||
253 | return -ENOMEM; | ||
254 | |||
255 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | ||
256 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
257 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); | ||
258 | |||
259 | kfree(tag_index); | ||
260 | kfree(tag_map); | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | EXPORT_SYMBOL(blk_queue_resize_tags); | ||
265 | |||
266 | /** | ||
267 | * blk_queue_end_tag - end tag operations for a request | ||
268 | * @q: the request queue for the device | ||
269 | * @rq: the request that has completed | ||
270 | * | ||
271 | * Description: | ||
272 | * Typically called when end_that_request_first() returns 0, meaning | ||
273 | * all transfers have been done for a request. It's important to call | ||
274 | * this function before end_that_request_last(), as that will put the | ||
275 | * request back on the free list thus corrupting the internal tag list. | ||
276 | * | ||
277 | * Notes: | ||
278 | * queue lock must be held. | ||
279 | **/ | ||
280 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) | ||
281 | { | ||
282 | struct blk_queue_tag *bqt = q->queue_tags; | ||
283 | int tag = rq->tag; | ||
284 | |||
285 | BUG_ON(tag == -1); | ||
286 | |||
287 | if (unlikely(tag >= bqt->real_max_depth)) | ||
288 | /* | ||
289 | * This can happen after tag depth has been reduced. | ||
290 | * FIXME: how about a warning or info message here? | ||
291 | */ | ||
292 | return; | ||
293 | |||
294 | list_del_init(&rq->queuelist); | ||
295 | rq->cmd_flags &= ~REQ_QUEUED; | ||
296 | rq->tag = -1; | ||
297 | |||
298 | if (unlikely(bqt->tag_index[tag] == NULL)) | ||
299 | printk(KERN_ERR "%s: tag %d is missing\n", | ||
300 | __FUNCTION__, tag); | ||
301 | |||
302 | bqt->tag_index[tag] = NULL; | ||
303 | |||
304 | if (unlikely(!test_bit(tag, bqt->tag_map))) { | ||
305 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | ||
306 | __FUNCTION__, tag); | ||
307 | return; | ||
308 | } | ||
309 | /* | ||
310 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
311 | * unlock memory barrier semantics. | ||
312 | */ | ||
313 | clear_bit_unlock(tag, bqt->tag_map); | ||
314 | bqt->busy--; | ||
315 | } | ||
316 | |||
317 | EXPORT_SYMBOL(blk_queue_end_tag); | ||
318 | |||
319 | /** | ||
320 | * blk_queue_start_tag - find a free tag and assign it | ||
321 | * @q: the request queue for the device | ||
322 | * @rq: the block request that needs tagging | ||
323 | * | ||
324 | * Description: | ||
325 | * This can either be used as a stand-alone helper, or possibly be | ||
326 | * assigned as the queue &prep_rq_fn (in which case &struct request | ||
327 | * automagically gets a tag assigned). Note that this function | ||
328 | * assumes that any type of request can be queued! if this is not | ||
329 | * true for your device, you must check the request type before | ||
330 | * calling this function. The request will also be removed from | ||
331 | * the request queue, so it's the drivers responsibility to readd | ||
332 | * it if it should need to be restarted for some reason. | ||
333 | * | ||
334 | * Notes: | ||
335 | * queue lock must be held. | ||
336 | **/ | ||
337 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) | ||
338 | { | ||
339 | struct blk_queue_tag *bqt = q->queue_tags; | ||
340 | int tag; | ||
341 | |||
342 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { | ||
343 | printk(KERN_ERR | ||
344 | "%s: request %p for device [%s] already tagged %d", | ||
345 | __FUNCTION__, rq, | ||
346 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | ||
347 | BUG(); | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Protect against shared tag maps, as we may not have exclusive | ||
352 | * access to the tag map. | ||
353 | */ | ||
354 | do { | ||
355 | tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); | ||
356 | if (tag >= bqt->max_depth) | ||
357 | return 1; | ||
358 | |||
359 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); | ||
360 | /* | ||
361 | * We need lock ordering semantics given by test_and_set_bit_lock. | ||
362 | * See blk_queue_end_tag for details. | ||
363 | */ | ||
364 | |||
365 | rq->cmd_flags |= REQ_QUEUED; | ||
366 | rq->tag = tag; | ||
367 | bqt->tag_index[tag] = rq; | ||
368 | blkdev_dequeue_request(rq); | ||
369 | list_add(&rq->queuelist, &q->tag_busy_list); | ||
370 | bqt->busy++; | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | EXPORT_SYMBOL(blk_queue_start_tag); | ||
375 | |||
376 | /** | ||
377 | * blk_queue_invalidate_tags - invalidate all pending tags | ||
378 | * @q: the request queue for the device | ||
379 | * | ||
380 | * Description: | ||
381 | * Hardware conditions may dictate a need to stop all pending requests. | ||
382 | * In this case, we will safely clear the block side of the tag queue and | ||
383 | * readd all requests to the request queue in the right order. | ||
384 | * | ||
385 | * Notes: | ||
386 | * queue lock must be held. | ||
387 | **/ | ||
388 | void blk_queue_invalidate_tags(struct request_queue *q) | ||
389 | { | ||
390 | struct list_head *tmp, *n; | ||
391 | |||
392 | list_for_each_safe(tmp, n, &q->tag_busy_list) | ||
393 | blk_requeue_request(q, list_entry_rq(tmp)); | ||
394 | } | ||
395 | |||
396 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | ||