diff options
author | Paul Mackerras <paulus@samba.org> | 2005-11-07 19:14:20 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-07 19:14:20 -0500 |
commit | 24bfb00123e82a2e70bd115277d922438813515b (patch) | |
tree | 27328b8a5718e16d64e2d101f4b7ddcad5930aed /block/ll_rw_blk.c | |
parent | c6135234550ed89a6fd0e8cb229633967e41d649 (diff) | |
parent | 3f00d3e8fb963968a922d821a9a53b503b687e81 (diff) |
Merge ../linux-2.6
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 3612 |
1 files changed, 3612 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c new file mode 100644 index 000000000000..5f52e30b43f8 --- /dev/null +++ b/block/ll_rw_blk.c | |||
@@ -0,0 +1,3612 @@ | |||
1 | /* | ||
2 | * linux/drivers/block/ll_rw_blk.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics | ||
6 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | ||
7 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | ||
8 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 | ||
9 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * This handles all read/write requests to block devices | ||
14 | */ | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/backing-dev.h> | ||
19 | #include <linux/bio.h> | ||
20 | #include <linux/blkdev.h> | ||
21 | #include <linux/highmem.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/kernel_stat.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | ||
27 | #include <linux/completion.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/swap.h> | ||
30 | #include <linux/writeback.h> | ||
31 | #include <linux/blkdev.h> | ||
32 | |||
33 | /* | ||
34 | * for max sense size | ||
35 | */ | ||
36 | #include <scsi/scsi_cmnd.h> | ||
37 | |||
38 | static void blk_unplug_work(void *data); | ||
39 | static void blk_unplug_timeout(unsigned long data); | ||
40 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | ||
41 | |||
42 | /* | ||
43 | * For the allocated request tables | ||
44 | */ | ||
45 | static kmem_cache_t *request_cachep; | ||
46 | |||
47 | /* | ||
48 | * For queue allocation | ||
49 | */ | ||
50 | static kmem_cache_t *requestq_cachep; | ||
51 | |||
52 | /* | ||
53 | * For io context allocations | ||
54 | */ | ||
55 | static kmem_cache_t *iocontext_cachep; | ||
56 | |||
57 | static wait_queue_head_t congestion_wqh[2] = { | ||
58 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | ||
59 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * Controlling structure to kblockd | ||
64 | */ | ||
65 | static struct workqueue_struct *kblockd_workqueue; | ||
66 | |||
67 | unsigned long blk_max_low_pfn, blk_max_pfn; | ||
68 | |||
69 | EXPORT_SYMBOL(blk_max_low_pfn); | ||
70 | EXPORT_SYMBOL(blk_max_pfn); | ||
71 | |||
72 | /* Amount of time in which a process may batch requests */ | ||
73 | #define BLK_BATCH_TIME (HZ/50UL) | ||
74 | |||
75 | /* Number of requests a "batching" process may submit */ | ||
76 | #define BLK_BATCH_REQ 32 | ||
77 | |||
78 | /* | ||
79 | * Return the threshold (number of used requests) at which the queue is | ||
80 | * considered to be congested. It include a little hysteresis to keep the | ||
81 | * context switch rate down. | ||
82 | */ | ||
83 | static inline int queue_congestion_on_threshold(struct request_queue *q) | ||
84 | { | ||
85 | return q->nr_congestion_on; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * The threshold at which a queue is considered to be uncongested | ||
90 | */ | ||
91 | static inline int queue_congestion_off_threshold(struct request_queue *q) | ||
92 | { | ||
93 | return q->nr_congestion_off; | ||
94 | } | ||
95 | |||
96 | static void blk_queue_congestion_threshold(struct request_queue *q) | ||
97 | { | ||
98 | int nr; | ||
99 | |||
100 | nr = q->nr_requests - (q->nr_requests / 8) + 1; | ||
101 | if (nr > q->nr_requests) | ||
102 | nr = q->nr_requests; | ||
103 | q->nr_congestion_on = nr; | ||
104 | |||
105 | nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; | ||
106 | if (nr < 1) | ||
107 | nr = 1; | ||
108 | q->nr_congestion_off = nr; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * A queue has just exitted congestion. Note this in the global counter of | ||
113 | * congested queues, and wake up anyone who was waiting for requests to be | ||
114 | * put back. | ||
115 | */ | ||
116 | static void clear_queue_congested(request_queue_t *q, int rw) | ||
117 | { | ||
118 | enum bdi_state bit; | ||
119 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | ||
120 | |||
121 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | ||
122 | clear_bit(bit, &q->backing_dev_info.state); | ||
123 | smp_mb__after_clear_bit(); | ||
124 | if (waitqueue_active(wqh)) | ||
125 | wake_up(wqh); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * A queue has just entered congestion. Flag that in the queue's VM-visible | ||
130 | * state flags and increment the global gounter of congested queues. | ||
131 | */ | ||
132 | static void set_queue_congested(request_queue_t *q, int rw) | ||
133 | { | ||
134 | enum bdi_state bit; | ||
135 | |||
136 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | ||
137 | set_bit(bit, &q->backing_dev_info.state); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | ||
142 | * @bdev: device | ||
143 | * | ||
144 | * Locates the passed device's request queue and returns the address of its | ||
145 | * backing_dev_info | ||
146 | * | ||
147 | * Will return NULL if the request queue cannot be located. | ||
148 | */ | ||
149 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | ||
150 | { | ||
151 | struct backing_dev_info *ret = NULL; | ||
152 | request_queue_t *q = bdev_get_queue(bdev); | ||
153 | |||
154 | if (q) | ||
155 | ret = &q->backing_dev_info; | ||
156 | return ret; | ||
157 | } | ||
158 | |||
159 | EXPORT_SYMBOL(blk_get_backing_dev_info); | ||
160 | |||
161 | void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) | ||
162 | { | ||
163 | q->activity_fn = fn; | ||
164 | q->activity_data = data; | ||
165 | } | ||
166 | |||
167 | EXPORT_SYMBOL(blk_queue_activity_fn); | ||
168 | |||
169 | /** | ||
170 | * blk_queue_prep_rq - set a prepare_request function for queue | ||
171 | * @q: queue | ||
172 | * @pfn: prepare_request function | ||
173 | * | ||
174 | * It's possible for a queue to register a prepare_request callback which | ||
175 | * is invoked before the request is handed to the request_fn. The goal of | ||
176 | * the function is to prepare a request for I/O, it can be used to build a | ||
177 | * cdb from the request data for instance. | ||
178 | * | ||
179 | */ | ||
180 | void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) | ||
181 | { | ||
182 | q->prep_rq_fn = pfn; | ||
183 | } | ||
184 | |||
185 | EXPORT_SYMBOL(blk_queue_prep_rq); | ||
186 | |||
187 | /** | ||
188 | * blk_queue_merge_bvec - set a merge_bvec function for queue | ||
189 | * @q: queue | ||
190 | * @mbfn: merge_bvec_fn | ||
191 | * | ||
192 | * Usually queues have static limitations on the max sectors or segments that | ||
193 | * we can put in a request. Stacking drivers may have some settings that | ||
194 | * are dynamic, and thus we have to query the queue whether it is ok to | ||
195 | * add a new bio_vec to a bio at a given offset or not. If the block device | ||
196 | * has such limitations, it needs to register a merge_bvec_fn to control | ||
197 | * the size of bio's sent to it. Note that a block device *must* allow a | ||
198 | * single page to be added to an empty bio. The block device driver may want | ||
199 | * to use the bio_split() function to deal with these bio's. By default | ||
200 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | ||
201 | * honored. | ||
202 | */ | ||
203 | void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) | ||
204 | { | ||
205 | q->merge_bvec_fn = mbfn; | ||
206 | } | ||
207 | |||
208 | EXPORT_SYMBOL(blk_queue_merge_bvec); | ||
209 | |||
210 | /** | ||
211 | * blk_queue_make_request - define an alternate make_request function for a device | ||
212 | * @q: the request queue for the device to be affected | ||
213 | * @mfn: the alternate make_request function | ||
214 | * | ||
215 | * Description: | ||
216 | * The normal way for &struct bios to be passed to a device | ||
217 | * driver is for them to be collected into requests on a request | ||
218 | * queue, and then to allow the device driver to select requests | ||
219 | * off that queue when it is ready. This works well for many block | ||
220 | * devices. However some block devices (typically virtual devices | ||
221 | * such as md or lvm) do not benefit from the processing on the | ||
222 | * request queue, and are served best by having the requests passed | ||
223 | * directly to them. This can be achieved by providing a function | ||
224 | * to blk_queue_make_request(). | ||
225 | * | ||
226 | * Caveat: | ||
227 | * The driver that does this *must* be able to deal appropriately | ||
228 | * with buffers in "highmemory". This can be accomplished by either calling | ||
229 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | ||
230 | * blk_queue_bounce() to create a buffer in normal memory. | ||
231 | **/ | ||
232 | void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | ||
233 | { | ||
234 | /* | ||
235 | * set defaults | ||
236 | */ | ||
237 | q->nr_requests = BLKDEV_MAX_RQ; | ||
238 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
239 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
240 | q->make_request_fn = mfn; | ||
241 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
242 | q->backing_dev_info.state = 0; | ||
243 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | ||
244 | blk_queue_max_sectors(q, MAX_SECTORS); | ||
245 | blk_queue_hardsect_size(q, 512); | ||
246 | blk_queue_dma_alignment(q, 511); | ||
247 | blk_queue_congestion_threshold(q); | ||
248 | q->nr_batching = BLK_BATCH_REQ; | ||
249 | |||
250 | q->unplug_thresh = 4; /* hmm */ | ||
251 | q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ | ||
252 | if (q->unplug_delay == 0) | ||
253 | q->unplug_delay = 1; | ||
254 | |||
255 | INIT_WORK(&q->unplug_work, blk_unplug_work, q); | ||
256 | |||
257 | q->unplug_timer.function = blk_unplug_timeout; | ||
258 | q->unplug_timer.data = (unsigned long)q; | ||
259 | |||
260 | /* | ||
261 | * by default assume old behaviour and bounce for any highmem page | ||
262 | */ | ||
263 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | ||
264 | |||
265 | blk_queue_activity_fn(q, NULL, NULL); | ||
266 | } | ||
267 | |||
268 | EXPORT_SYMBOL(blk_queue_make_request); | ||
269 | |||
270 | static inline void rq_init(request_queue_t *q, struct request *rq) | ||
271 | { | ||
272 | INIT_LIST_HEAD(&rq->queuelist); | ||
273 | |||
274 | rq->errors = 0; | ||
275 | rq->rq_status = RQ_ACTIVE; | ||
276 | rq->bio = rq->biotail = NULL; | ||
277 | rq->ioprio = 0; | ||
278 | rq->buffer = NULL; | ||
279 | rq->ref_count = 1; | ||
280 | rq->q = q; | ||
281 | rq->waiting = NULL; | ||
282 | rq->special = NULL; | ||
283 | rq->data_len = 0; | ||
284 | rq->data = NULL; | ||
285 | rq->nr_phys_segments = 0; | ||
286 | rq->sense = NULL; | ||
287 | rq->end_io = NULL; | ||
288 | rq->end_io_data = NULL; | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * blk_queue_ordered - does this queue support ordered writes | ||
293 | * @q: the request queue | ||
294 | * @flag: see below | ||
295 | * | ||
296 | * Description: | ||
297 | * For journalled file systems, doing ordered writes on a commit | ||
298 | * block instead of explicitly doing wait_on_buffer (which is bad | ||
299 | * for performance) can be a big win. Block drivers supporting this | ||
300 | * feature should call this function and indicate so. | ||
301 | * | ||
302 | **/ | ||
303 | void blk_queue_ordered(request_queue_t *q, int flag) | ||
304 | { | ||
305 | switch (flag) { | ||
306 | case QUEUE_ORDERED_NONE: | ||
307 | if (q->flush_rq) | ||
308 | kmem_cache_free(request_cachep, q->flush_rq); | ||
309 | q->flush_rq = NULL; | ||
310 | q->ordered = flag; | ||
311 | break; | ||
312 | case QUEUE_ORDERED_TAG: | ||
313 | q->ordered = flag; | ||
314 | break; | ||
315 | case QUEUE_ORDERED_FLUSH: | ||
316 | q->ordered = flag; | ||
317 | if (!q->flush_rq) | ||
318 | q->flush_rq = kmem_cache_alloc(request_cachep, | ||
319 | GFP_KERNEL); | ||
320 | break; | ||
321 | default: | ||
322 | printk("blk_queue_ordered: bad value %d\n", flag); | ||
323 | break; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | EXPORT_SYMBOL(blk_queue_ordered); | ||
328 | |||
329 | /** | ||
330 | * blk_queue_issue_flush_fn - set function for issuing a flush | ||
331 | * @q: the request queue | ||
332 | * @iff: the function to be called issuing the flush | ||
333 | * | ||
334 | * Description: | ||
335 | * If a driver supports issuing a flush command, the support is notified | ||
336 | * to the block layer by defining it through this call. | ||
337 | * | ||
338 | **/ | ||
339 | void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) | ||
340 | { | ||
341 | q->issue_flush_fn = iff; | ||
342 | } | ||
343 | |||
344 | EXPORT_SYMBOL(blk_queue_issue_flush_fn); | ||
345 | |||
346 | /* | ||
347 | * Cache flushing for ordered writes handling | ||
348 | */ | ||
349 | static void blk_pre_flush_end_io(struct request *flush_rq) | ||
350 | { | ||
351 | struct request *rq = flush_rq->end_io_data; | ||
352 | request_queue_t *q = rq->q; | ||
353 | |||
354 | elv_completed_request(q, flush_rq); | ||
355 | |||
356 | rq->flags |= REQ_BAR_PREFLUSH; | ||
357 | |||
358 | if (!flush_rq->errors) | ||
359 | elv_requeue_request(q, rq); | ||
360 | else { | ||
361 | q->end_flush_fn(q, flush_rq); | ||
362 | clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags); | ||
363 | q->request_fn(q); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | static void blk_post_flush_end_io(struct request *flush_rq) | ||
368 | { | ||
369 | struct request *rq = flush_rq->end_io_data; | ||
370 | request_queue_t *q = rq->q; | ||
371 | |||
372 | elv_completed_request(q, flush_rq); | ||
373 | |||
374 | rq->flags |= REQ_BAR_POSTFLUSH; | ||
375 | |||
376 | q->end_flush_fn(q, flush_rq); | ||
377 | clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags); | ||
378 | q->request_fn(q); | ||
379 | } | ||
380 | |||
381 | struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) | ||
382 | { | ||
383 | struct request *flush_rq = q->flush_rq; | ||
384 | |||
385 | BUG_ON(!blk_barrier_rq(rq)); | ||
386 | |||
387 | if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags)) | ||
388 | return NULL; | ||
389 | |||
390 | rq_init(q, flush_rq); | ||
391 | flush_rq->elevator_private = NULL; | ||
392 | flush_rq->flags = REQ_BAR_FLUSH; | ||
393 | flush_rq->rq_disk = rq->rq_disk; | ||
394 | flush_rq->rl = NULL; | ||
395 | |||
396 | /* | ||
397 | * prepare_flush returns 0 if no flush is needed, just mark both | ||
398 | * pre and post flush as done in that case | ||
399 | */ | ||
400 | if (!q->prepare_flush_fn(q, flush_rq)) { | ||
401 | rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH; | ||
402 | clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags); | ||
403 | return rq; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * some drivers dequeue requests right away, some only after io | ||
408 | * completion. make sure the request is dequeued. | ||
409 | */ | ||
410 | if (!list_empty(&rq->queuelist)) | ||
411 | blkdev_dequeue_request(rq); | ||
412 | |||
413 | flush_rq->end_io_data = rq; | ||
414 | flush_rq->end_io = blk_pre_flush_end_io; | ||
415 | |||
416 | __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0); | ||
417 | return flush_rq; | ||
418 | } | ||
419 | |||
420 | static void blk_start_post_flush(request_queue_t *q, struct request *rq) | ||
421 | { | ||
422 | struct request *flush_rq = q->flush_rq; | ||
423 | |||
424 | BUG_ON(!blk_barrier_rq(rq)); | ||
425 | |||
426 | rq_init(q, flush_rq); | ||
427 | flush_rq->elevator_private = NULL; | ||
428 | flush_rq->flags = REQ_BAR_FLUSH; | ||
429 | flush_rq->rq_disk = rq->rq_disk; | ||
430 | flush_rq->rl = NULL; | ||
431 | |||
432 | if (q->prepare_flush_fn(q, flush_rq)) { | ||
433 | flush_rq->end_io_data = rq; | ||
434 | flush_rq->end_io = blk_post_flush_end_io; | ||
435 | |||
436 | __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0); | ||
437 | q->request_fn(q); | ||
438 | } | ||
439 | } | ||
440 | |||
441 | static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq, | ||
442 | int sectors) | ||
443 | { | ||
444 | if (sectors > rq->nr_sectors) | ||
445 | sectors = rq->nr_sectors; | ||
446 | |||
447 | rq->nr_sectors -= sectors; | ||
448 | return rq->nr_sectors; | ||
449 | } | ||
450 | |||
451 | static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq, | ||
452 | int sectors, int queue_locked) | ||
453 | { | ||
454 | if (q->ordered != QUEUE_ORDERED_FLUSH) | ||
455 | return 0; | ||
456 | if (!blk_fs_request(rq) || !blk_barrier_rq(rq)) | ||
457 | return 0; | ||
458 | if (blk_barrier_postflush(rq)) | ||
459 | return 0; | ||
460 | |||
461 | if (!blk_check_end_barrier(q, rq, sectors)) { | ||
462 | unsigned long flags = 0; | ||
463 | |||
464 | if (!queue_locked) | ||
465 | spin_lock_irqsave(q->queue_lock, flags); | ||
466 | |||
467 | blk_start_post_flush(q, rq); | ||
468 | |||
469 | if (!queue_locked) | ||
470 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
471 | } | ||
472 | |||
473 | return 1; | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * blk_complete_barrier_rq - complete possible barrier request | ||
478 | * @q: the request queue for the device | ||
479 | * @rq: the request | ||
480 | * @sectors: number of sectors to complete | ||
481 | * | ||
482 | * Description: | ||
483 | * Used in driver end_io handling to determine whether to postpone | ||
484 | * completion of a barrier request until a post flush has been done. This | ||
485 | * is the unlocked variant, used if the caller doesn't already hold the | ||
486 | * queue lock. | ||
487 | **/ | ||
488 | int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors) | ||
489 | { | ||
490 | return __blk_complete_barrier_rq(q, rq, sectors, 0); | ||
491 | } | ||
492 | EXPORT_SYMBOL(blk_complete_barrier_rq); | ||
493 | |||
494 | /** | ||
495 | * blk_complete_barrier_rq_locked - complete possible barrier request | ||
496 | * @q: the request queue for the device | ||
497 | * @rq: the request | ||
498 | * @sectors: number of sectors to complete | ||
499 | * | ||
500 | * Description: | ||
501 | * See blk_complete_barrier_rq(). This variant must be used if the caller | ||
502 | * holds the queue lock. | ||
503 | **/ | ||
504 | int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq, | ||
505 | int sectors) | ||
506 | { | ||
507 | return __blk_complete_barrier_rq(q, rq, sectors, 1); | ||
508 | } | ||
509 | EXPORT_SYMBOL(blk_complete_barrier_rq_locked); | ||
510 | |||
511 | /** | ||
512 | * blk_queue_bounce_limit - set bounce buffer limit for queue | ||
513 | * @q: the request queue for the device | ||
514 | * @dma_addr: bus address limit | ||
515 | * | ||
516 | * Description: | ||
517 | * Different hardware can have different requirements as to what pages | ||
518 | * it can do I/O directly to. A low level driver can call | ||
519 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | ||
520 | * buffers for doing I/O to pages residing above @page. By default | ||
521 | * the block layer sets this to the highest numbered "low" memory page. | ||
522 | **/ | ||
523 | void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) | ||
524 | { | ||
525 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; | ||
526 | |||
527 | /* | ||
528 | * set appropriate bounce gfp mask -- unfortunately we don't have a | ||
529 | * full 4GB zone, so we have to resort to low memory for any bounces. | ||
530 | * ISA has its own < 16MB zone. | ||
531 | */ | ||
532 | if (bounce_pfn < blk_max_low_pfn) { | ||
533 | BUG_ON(dma_addr < BLK_BOUNCE_ISA); | ||
534 | init_emergency_isa_pool(); | ||
535 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | ||
536 | } else | ||
537 | q->bounce_gfp = GFP_NOIO; | ||
538 | |||
539 | q->bounce_pfn = bounce_pfn; | ||
540 | } | ||
541 | |||
542 | EXPORT_SYMBOL(blk_queue_bounce_limit); | ||
543 | |||
544 | /** | ||
545 | * blk_queue_max_sectors - set max sectors for a request for this queue | ||
546 | * @q: the request queue for the device | ||
547 | * @max_sectors: max sectors in the usual 512b unit | ||
548 | * | ||
549 | * Description: | ||
550 | * Enables a low level driver to set an upper limit on the size of | ||
551 | * received requests. | ||
552 | **/ | ||
553 | void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) | ||
554 | { | ||
555 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | ||
556 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | ||
557 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | ||
558 | } | ||
559 | |||
560 | q->max_sectors = q->max_hw_sectors = max_sectors; | ||
561 | } | ||
562 | |||
563 | EXPORT_SYMBOL(blk_queue_max_sectors); | ||
564 | |||
565 | /** | ||
566 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | ||
567 | * @q: the request queue for the device | ||
568 | * @max_segments: max number of segments | ||
569 | * | ||
570 | * Description: | ||
571 | * Enables a low level driver to set an upper limit on the number of | ||
572 | * physical data segments in a request. This would be the largest sized | ||
573 | * scatter list the driver could handle. | ||
574 | **/ | ||
575 | void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments) | ||
576 | { | ||
577 | if (!max_segments) { | ||
578 | max_segments = 1; | ||
579 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
580 | } | ||
581 | |||
582 | q->max_phys_segments = max_segments; | ||
583 | } | ||
584 | |||
585 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | ||
586 | |||
587 | /** | ||
588 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
589 | * @q: the request queue for the device | ||
590 | * @max_segments: max number of segments | ||
591 | * | ||
592 | * Description: | ||
593 | * Enables a low level driver to set an upper limit on the number of | ||
594 | * hw data segments in a request. This would be the largest number of | ||
595 | * address/length pairs the host adapter can actually give as once | ||
596 | * to the device. | ||
597 | **/ | ||
598 | void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments) | ||
599 | { | ||
600 | if (!max_segments) { | ||
601 | max_segments = 1; | ||
602 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
603 | } | ||
604 | |||
605 | q->max_hw_segments = max_segments; | ||
606 | } | ||
607 | |||
608 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
609 | |||
610 | /** | ||
611 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | ||
612 | * @q: the request queue for the device | ||
613 | * @max_size: max size of segment in bytes | ||
614 | * | ||
615 | * Description: | ||
616 | * Enables a low level driver to set an upper limit on the size of a | ||
617 | * coalesced segment | ||
618 | **/ | ||
619 | void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size) | ||
620 | { | ||
621 | if (max_size < PAGE_CACHE_SIZE) { | ||
622 | max_size = PAGE_CACHE_SIZE; | ||
623 | printk("%s: set to minimum %d\n", __FUNCTION__, max_size); | ||
624 | } | ||
625 | |||
626 | q->max_segment_size = max_size; | ||
627 | } | ||
628 | |||
629 | EXPORT_SYMBOL(blk_queue_max_segment_size); | ||
630 | |||
631 | /** | ||
632 | * blk_queue_hardsect_size - set hardware sector size for the queue | ||
633 | * @q: the request queue for the device | ||
634 | * @size: the hardware sector size, in bytes | ||
635 | * | ||
636 | * Description: | ||
637 | * This should typically be set to the lowest possible sector size | ||
638 | * that the hardware can operate on (possible without reverting to | ||
639 | * even internal read-modify-write operations). Usually the default | ||
640 | * of 512 covers most hardware. | ||
641 | **/ | ||
642 | void blk_queue_hardsect_size(request_queue_t *q, unsigned short size) | ||
643 | { | ||
644 | q->hardsect_size = size; | ||
645 | } | ||
646 | |||
647 | EXPORT_SYMBOL(blk_queue_hardsect_size); | ||
648 | |||
649 | /* | ||
650 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
651 | */ | ||
652 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
653 | |||
654 | /** | ||
655 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | ||
656 | * @t: the stacking driver (top) | ||
657 | * @b: the underlying device (bottom) | ||
658 | **/ | ||
659 | void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) | ||
660 | { | ||
661 | /* zero is "infinity" */ | ||
662 | t->max_sectors = t->max_hw_sectors = | ||
663 | min_not_zero(t->max_sectors,b->max_sectors); | ||
664 | |||
665 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | ||
666 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | ||
667 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); | ||
668 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); | ||
669 | } | ||
670 | |||
671 | EXPORT_SYMBOL(blk_queue_stack_limits); | ||
672 | |||
673 | /** | ||
674 | * blk_queue_segment_boundary - set boundary rules for segment merging | ||
675 | * @q: the request queue for the device | ||
676 | * @mask: the memory boundary mask | ||
677 | **/ | ||
678 | void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) | ||
679 | { | ||
680 | if (mask < PAGE_CACHE_SIZE - 1) { | ||
681 | mask = PAGE_CACHE_SIZE - 1; | ||
682 | printk("%s: set to minimum %lx\n", __FUNCTION__, mask); | ||
683 | } | ||
684 | |||
685 | q->seg_boundary_mask = mask; | ||
686 | } | ||
687 | |||
688 | EXPORT_SYMBOL(blk_queue_segment_boundary); | ||
689 | |||
690 | /** | ||
691 | * blk_queue_dma_alignment - set dma length and memory alignment | ||
692 | * @q: the request queue for the device | ||
693 | * @mask: alignment mask | ||
694 | * | ||
695 | * description: | ||
696 | * set required memory and length aligment for direct dma transactions. | ||
697 | * this is used when buiding direct io requests for the queue. | ||
698 | * | ||
699 | **/ | ||
700 | void blk_queue_dma_alignment(request_queue_t *q, int mask) | ||
701 | { | ||
702 | q->dma_alignment = mask; | ||
703 | } | ||
704 | |||
705 | EXPORT_SYMBOL(blk_queue_dma_alignment); | ||
706 | |||
707 | /** | ||
708 | * blk_queue_find_tag - find a request by its tag and queue | ||
709 | * @q: The request queue for the device | ||
710 | * @tag: The tag of the request | ||
711 | * | ||
712 | * Notes: | ||
713 | * Should be used when a device returns a tag and you want to match | ||
714 | * it with a request. | ||
715 | * | ||
716 | * no locks need be held. | ||
717 | **/ | ||
718 | struct request *blk_queue_find_tag(request_queue_t *q, int tag) | ||
719 | { | ||
720 | struct blk_queue_tag *bqt = q->queue_tags; | ||
721 | |||
722 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) | ||
723 | return NULL; | ||
724 | |||
725 | return bqt->tag_index[tag]; | ||
726 | } | ||
727 | |||
728 | EXPORT_SYMBOL(blk_queue_find_tag); | ||
729 | |||
730 | /** | ||
731 | * __blk_queue_free_tags - release tag maintenance info | ||
732 | * @q: the request queue for the device | ||
733 | * | ||
734 | * Notes: | ||
735 | * blk_cleanup_queue() will take care of calling this function, if tagging | ||
736 | * has been used. So there's no need to call this directly. | ||
737 | **/ | ||
738 | static void __blk_queue_free_tags(request_queue_t *q) | ||
739 | { | ||
740 | struct blk_queue_tag *bqt = q->queue_tags; | ||
741 | |||
742 | if (!bqt) | ||
743 | return; | ||
744 | |||
745 | if (atomic_dec_and_test(&bqt->refcnt)) { | ||
746 | BUG_ON(bqt->busy); | ||
747 | BUG_ON(!list_empty(&bqt->busy_list)); | ||
748 | |||
749 | kfree(bqt->tag_index); | ||
750 | bqt->tag_index = NULL; | ||
751 | |||
752 | kfree(bqt->tag_map); | ||
753 | bqt->tag_map = NULL; | ||
754 | |||
755 | kfree(bqt); | ||
756 | } | ||
757 | |||
758 | q->queue_tags = NULL; | ||
759 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | ||
760 | } | ||
761 | |||
762 | /** | ||
763 | * blk_queue_free_tags - release tag maintenance info | ||
764 | * @q: the request queue for the device | ||
765 | * | ||
766 | * Notes: | ||
767 | * This is used to disabled tagged queuing to a device, yet leave | ||
768 | * queue in function. | ||
769 | **/ | ||
770 | void blk_queue_free_tags(request_queue_t *q) | ||
771 | { | ||
772 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
773 | } | ||
774 | |||
775 | EXPORT_SYMBOL(blk_queue_free_tags); | ||
776 | |||
777 | static int | ||
778 | init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | ||
779 | { | ||
780 | struct request **tag_index; | ||
781 | unsigned long *tag_map; | ||
782 | int nr_ulongs; | ||
783 | |||
784 | if (depth > q->nr_requests * 2) { | ||
785 | depth = q->nr_requests * 2; | ||
786 | printk(KERN_ERR "%s: adjusted depth to %d\n", | ||
787 | __FUNCTION__, depth); | ||
788 | } | ||
789 | |||
790 | tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); | ||
791 | if (!tag_index) | ||
792 | goto fail; | ||
793 | |||
794 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
795 | tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | ||
796 | if (!tag_map) | ||
797 | goto fail; | ||
798 | |||
799 | memset(tag_index, 0, depth * sizeof(struct request *)); | ||
800 | memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); | ||
801 | tags->real_max_depth = depth; | ||
802 | tags->max_depth = depth; | ||
803 | tags->tag_index = tag_index; | ||
804 | tags->tag_map = tag_map; | ||
805 | |||
806 | return 0; | ||
807 | fail: | ||
808 | kfree(tag_index); | ||
809 | return -ENOMEM; | ||
810 | } | ||
811 | |||
812 | /** | ||
813 | * blk_queue_init_tags - initialize the queue tag info | ||
814 | * @q: the request queue for the device | ||
815 | * @depth: the maximum queue depth supported | ||
816 | * @tags: the tag to use | ||
817 | **/ | ||
818 | int blk_queue_init_tags(request_queue_t *q, int depth, | ||
819 | struct blk_queue_tag *tags) | ||
820 | { | ||
821 | int rc; | ||
822 | |||
823 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | ||
824 | |||
825 | if (!tags && !q->queue_tags) { | ||
826 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | ||
827 | if (!tags) | ||
828 | goto fail; | ||
829 | |||
830 | if (init_tag_map(q, tags, depth)) | ||
831 | goto fail; | ||
832 | |||
833 | INIT_LIST_HEAD(&tags->busy_list); | ||
834 | tags->busy = 0; | ||
835 | atomic_set(&tags->refcnt, 1); | ||
836 | } else if (q->queue_tags) { | ||
837 | if ((rc = blk_queue_resize_tags(q, depth))) | ||
838 | return rc; | ||
839 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
840 | return 0; | ||
841 | } else | ||
842 | atomic_inc(&tags->refcnt); | ||
843 | |||
844 | /* | ||
845 | * assign it, all done | ||
846 | */ | ||
847 | q->queue_tags = tags; | ||
848 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | ||
849 | return 0; | ||
850 | fail: | ||
851 | kfree(tags); | ||
852 | return -ENOMEM; | ||
853 | } | ||
854 | |||
855 | EXPORT_SYMBOL(blk_queue_init_tags); | ||
856 | |||
857 | /** | ||
858 | * blk_queue_resize_tags - change the queueing depth | ||
859 | * @q: the request queue for the device | ||
860 | * @new_depth: the new max command queueing depth | ||
861 | * | ||
862 | * Notes: | ||
863 | * Must be called with the queue lock held. | ||
864 | **/ | ||
865 | int blk_queue_resize_tags(request_queue_t *q, int new_depth) | ||
866 | { | ||
867 | struct blk_queue_tag *bqt = q->queue_tags; | ||
868 | struct request **tag_index; | ||
869 | unsigned long *tag_map; | ||
870 | int max_depth, nr_ulongs; | ||
871 | |||
872 | if (!bqt) | ||
873 | return -ENXIO; | ||
874 | |||
875 | /* | ||
876 | * if we already have large enough real_max_depth. just | ||
877 | * adjust max_depth. *NOTE* as requests with tag value | ||
878 | * between new_depth and real_max_depth can be in-flight, tag | ||
879 | * map can not be shrunk blindly here. | ||
880 | */ | ||
881 | if (new_depth <= bqt->real_max_depth) { | ||
882 | bqt->max_depth = new_depth; | ||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | /* | ||
887 | * save the old state info, so we can copy it back | ||
888 | */ | ||
889 | tag_index = bqt->tag_index; | ||
890 | tag_map = bqt->tag_map; | ||
891 | max_depth = bqt->real_max_depth; | ||
892 | |||
893 | if (init_tag_map(q, bqt, new_depth)) | ||
894 | return -ENOMEM; | ||
895 | |||
896 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | ||
897 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
898 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); | ||
899 | |||
900 | kfree(tag_index); | ||
901 | kfree(tag_map); | ||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | EXPORT_SYMBOL(blk_queue_resize_tags); | ||
906 | |||
907 | /** | ||
908 | * blk_queue_end_tag - end tag operations for a request | ||
909 | * @q: the request queue for the device | ||
910 | * @rq: the request that has completed | ||
911 | * | ||
912 | * Description: | ||
913 | * Typically called when end_that_request_first() returns 0, meaning | ||
914 | * all transfers have been done for a request. It's important to call | ||
915 | * this function before end_that_request_last(), as that will put the | ||
916 | * request back on the free list thus corrupting the internal tag list. | ||
917 | * | ||
918 | * Notes: | ||
919 | * queue lock must be held. | ||
920 | **/ | ||
921 | void blk_queue_end_tag(request_queue_t *q, struct request *rq) | ||
922 | { | ||
923 | struct blk_queue_tag *bqt = q->queue_tags; | ||
924 | int tag = rq->tag; | ||
925 | |||
926 | BUG_ON(tag == -1); | ||
927 | |||
928 | if (unlikely(tag >= bqt->real_max_depth)) | ||
929 | /* | ||
930 | * This can happen after tag depth has been reduced. | ||
931 | * FIXME: how about a warning or info message here? | ||
932 | */ | ||
933 | return; | ||
934 | |||
935 | if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { | ||
936 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | ||
937 | __FUNCTION__, tag); | ||
938 | return; | ||
939 | } | ||
940 | |||
941 | list_del_init(&rq->queuelist); | ||
942 | rq->flags &= ~REQ_QUEUED; | ||
943 | rq->tag = -1; | ||
944 | |||
945 | if (unlikely(bqt->tag_index[tag] == NULL)) | ||
946 | printk(KERN_ERR "%s: tag %d is missing\n", | ||
947 | __FUNCTION__, tag); | ||
948 | |||
949 | bqt->tag_index[tag] = NULL; | ||
950 | bqt->busy--; | ||
951 | } | ||
952 | |||
953 | EXPORT_SYMBOL(blk_queue_end_tag); | ||
954 | |||
955 | /** | ||
956 | * blk_queue_start_tag - find a free tag and assign it | ||
957 | * @q: the request queue for the device | ||
958 | * @rq: the block request that needs tagging | ||
959 | * | ||
960 | * Description: | ||
961 | * This can either be used as a stand-alone helper, or possibly be | ||
962 | * assigned as the queue &prep_rq_fn (in which case &struct request | ||
963 | * automagically gets a tag assigned). Note that this function | ||
964 | * assumes that any type of request can be queued! if this is not | ||
965 | * true for your device, you must check the request type before | ||
966 | * calling this function. The request will also be removed from | ||
967 | * the request queue, so it's the drivers responsibility to readd | ||
968 | * it if it should need to be restarted for some reason. | ||
969 | * | ||
970 | * Notes: | ||
971 | * queue lock must be held. | ||
972 | **/ | ||
973 | int blk_queue_start_tag(request_queue_t *q, struct request *rq) | ||
974 | { | ||
975 | struct blk_queue_tag *bqt = q->queue_tags; | ||
976 | int tag; | ||
977 | |||
978 | if (unlikely((rq->flags & REQ_QUEUED))) { | ||
979 | printk(KERN_ERR | ||
980 | "%s: request %p for device [%s] already tagged %d", | ||
981 | __FUNCTION__, rq, | ||
982 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | ||
983 | BUG(); | ||
984 | } | ||
985 | |||
986 | tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); | ||
987 | if (tag >= bqt->max_depth) | ||
988 | return 1; | ||
989 | |||
990 | __set_bit(tag, bqt->tag_map); | ||
991 | |||
992 | rq->flags |= REQ_QUEUED; | ||
993 | rq->tag = tag; | ||
994 | bqt->tag_index[tag] = rq; | ||
995 | blkdev_dequeue_request(rq); | ||
996 | list_add(&rq->queuelist, &bqt->busy_list); | ||
997 | bqt->busy++; | ||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | EXPORT_SYMBOL(blk_queue_start_tag); | ||
1002 | |||
1003 | /** | ||
1004 | * blk_queue_invalidate_tags - invalidate all pending tags | ||
1005 | * @q: the request queue for the device | ||
1006 | * | ||
1007 | * Description: | ||
1008 | * Hardware conditions may dictate a need to stop all pending requests. | ||
1009 | * In this case, we will safely clear the block side of the tag queue and | ||
1010 | * readd all requests to the request queue in the right order. | ||
1011 | * | ||
1012 | * Notes: | ||
1013 | * queue lock must be held. | ||
1014 | **/ | ||
1015 | void blk_queue_invalidate_tags(request_queue_t *q) | ||
1016 | { | ||
1017 | struct blk_queue_tag *bqt = q->queue_tags; | ||
1018 | struct list_head *tmp, *n; | ||
1019 | struct request *rq; | ||
1020 | |||
1021 | list_for_each_safe(tmp, n, &bqt->busy_list) { | ||
1022 | rq = list_entry_rq(tmp); | ||
1023 | |||
1024 | if (rq->tag == -1) { | ||
1025 | printk(KERN_ERR | ||
1026 | "%s: bad tag found on list\n", __FUNCTION__); | ||
1027 | list_del_init(&rq->queuelist); | ||
1028 | rq->flags &= ~REQ_QUEUED; | ||
1029 | } else | ||
1030 | blk_queue_end_tag(q, rq); | ||
1031 | |||
1032 | rq->flags &= ~REQ_STARTED; | ||
1033 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | ||
1038 | |||
1039 | static char *rq_flags[] = { | ||
1040 | "REQ_RW", | ||
1041 | "REQ_FAILFAST", | ||
1042 | "REQ_SORTED", | ||
1043 | "REQ_SOFTBARRIER", | ||
1044 | "REQ_HARDBARRIER", | ||
1045 | "REQ_CMD", | ||
1046 | "REQ_NOMERGE", | ||
1047 | "REQ_STARTED", | ||
1048 | "REQ_DONTPREP", | ||
1049 | "REQ_QUEUED", | ||
1050 | "REQ_ELVPRIV", | ||
1051 | "REQ_PC", | ||
1052 | "REQ_BLOCK_PC", | ||
1053 | "REQ_SENSE", | ||
1054 | "REQ_FAILED", | ||
1055 | "REQ_QUIET", | ||
1056 | "REQ_SPECIAL", | ||
1057 | "REQ_DRIVE_CMD", | ||
1058 | "REQ_DRIVE_TASK", | ||
1059 | "REQ_DRIVE_TASKFILE", | ||
1060 | "REQ_PREEMPT", | ||
1061 | "REQ_PM_SUSPEND", | ||
1062 | "REQ_PM_RESUME", | ||
1063 | "REQ_PM_SHUTDOWN", | ||
1064 | }; | ||
1065 | |||
1066 | void blk_dump_rq_flags(struct request *rq, char *msg) | ||
1067 | { | ||
1068 | int bit; | ||
1069 | |||
1070 | printk("%s: dev %s: flags = ", msg, | ||
1071 | rq->rq_disk ? rq->rq_disk->disk_name : "?"); | ||
1072 | bit = 0; | ||
1073 | do { | ||
1074 | if (rq->flags & (1 << bit)) | ||
1075 | printk("%s ", rq_flags[bit]); | ||
1076 | bit++; | ||
1077 | } while (bit < __REQ_NR_BITS); | ||
1078 | |||
1079 | printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, | ||
1080 | rq->nr_sectors, | ||
1081 | rq->current_nr_sectors); | ||
1082 | printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); | ||
1083 | |||
1084 | if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { | ||
1085 | printk("cdb: "); | ||
1086 | for (bit = 0; bit < sizeof(rq->cmd); bit++) | ||
1087 | printk("%02x ", rq->cmd[bit]); | ||
1088 | printk("\n"); | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | EXPORT_SYMBOL(blk_dump_rq_flags); | ||
1093 | |||
1094 | void blk_recount_segments(request_queue_t *q, struct bio *bio) | ||
1095 | { | ||
1096 | struct bio_vec *bv, *bvprv = NULL; | ||
1097 | int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; | ||
1098 | int high, highprv = 1; | ||
1099 | |||
1100 | if (unlikely(!bio->bi_io_vec)) | ||
1101 | return; | ||
1102 | |||
1103 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | ||
1104 | hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0; | ||
1105 | bio_for_each_segment(bv, bio, i) { | ||
1106 | /* | ||
1107 | * the trick here is making sure that a high page is never | ||
1108 | * considered part of another segment, since that might | ||
1109 | * change with the bounce page. | ||
1110 | */ | ||
1111 | high = page_to_pfn(bv->bv_page) >= q->bounce_pfn; | ||
1112 | if (high || highprv) | ||
1113 | goto new_hw_segment; | ||
1114 | if (cluster) { | ||
1115 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
1116 | goto new_segment; | ||
1117 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
1118 | goto new_segment; | ||
1119 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
1120 | goto new_segment; | ||
1121 | if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | ||
1122 | goto new_hw_segment; | ||
1123 | |||
1124 | seg_size += bv->bv_len; | ||
1125 | hw_seg_size += bv->bv_len; | ||
1126 | bvprv = bv; | ||
1127 | continue; | ||
1128 | } | ||
1129 | new_segment: | ||
1130 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | ||
1131 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) { | ||
1132 | hw_seg_size += bv->bv_len; | ||
1133 | } else { | ||
1134 | new_hw_segment: | ||
1135 | if (hw_seg_size > bio->bi_hw_front_size) | ||
1136 | bio->bi_hw_front_size = hw_seg_size; | ||
1137 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | ||
1138 | nr_hw_segs++; | ||
1139 | } | ||
1140 | |||
1141 | nr_phys_segs++; | ||
1142 | bvprv = bv; | ||
1143 | seg_size = bv->bv_len; | ||
1144 | highprv = high; | ||
1145 | } | ||
1146 | if (hw_seg_size > bio->bi_hw_back_size) | ||
1147 | bio->bi_hw_back_size = hw_seg_size; | ||
1148 | if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size) | ||
1149 | bio->bi_hw_front_size = hw_seg_size; | ||
1150 | bio->bi_phys_segments = nr_phys_segs; | ||
1151 | bio->bi_hw_segments = nr_hw_segs; | ||
1152 | bio->bi_flags |= (1 << BIO_SEG_VALID); | ||
1153 | } | ||
1154 | |||
1155 | |||
1156 | static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | ||
1157 | struct bio *nxt) | ||
1158 | { | ||
1159 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | ||
1160 | return 0; | ||
1161 | |||
1162 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | ||
1163 | return 0; | ||
1164 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) | ||
1165 | return 0; | ||
1166 | |||
1167 | /* | ||
1168 | * bio and nxt are contigous in memory, check if the queue allows | ||
1169 | * these two to be merged into one | ||
1170 | */ | ||
1171 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | ||
1172 | return 1; | ||
1173 | |||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1177 | static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, | ||
1178 | struct bio *nxt) | ||
1179 | { | ||
1180 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
1181 | blk_recount_segments(q, bio); | ||
1182 | if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) | ||
1183 | blk_recount_segments(q, nxt); | ||
1184 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | ||
1185 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size)) | ||
1186 | return 0; | ||
1187 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) | ||
1188 | return 0; | ||
1189 | |||
1190 | return 1; | ||
1191 | } | ||
1192 | |||
1193 | /* | ||
1194 | * map a request to scatterlist, return number of sg entries setup. Caller | ||
1195 | * must make sure sg can hold rq->nr_phys_segments entries | ||
1196 | */ | ||
1197 | int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) | ||
1198 | { | ||
1199 | struct bio_vec *bvec, *bvprv; | ||
1200 | struct bio *bio; | ||
1201 | int nsegs, i, cluster; | ||
1202 | |||
1203 | nsegs = 0; | ||
1204 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | ||
1205 | |||
1206 | /* | ||
1207 | * for each bio in rq | ||
1208 | */ | ||
1209 | bvprv = NULL; | ||
1210 | rq_for_each_bio(bio, rq) { | ||
1211 | /* | ||
1212 | * for each segment in bio | ||
1213 | */ | ||
1214 | bio_for_each_segment(bvec, bio, i) { | ||
1215 | int nbytes = bvec->bv_len; | ||
1216 | |||
1217 | if (bvprv && cluster) { | ||
1218 | if (sg[nsegs - 1].length + nbytes > q->max_segment_size) | ||
1219 | goto new_segment; | ||
1220 | |||
1221 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | ||
1222 | goto new_segment; | ||
1223 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | ||
1224 | goto new_segment; | ||
1225 | |||
1226 | sg[nsegs - 1].length += nbytes; | ||
1227 | } else { | ||
1228 | new_segment: | ||
1229 | memset(&sg[nsegs],0,sizeof(struct scatterlist)); | ||
1230 | sg[nsegs].page = bvec->bv_page; | ||
1231 | sg[nsegs].length = nbytes; | ||
1232 | sg[nsegs].offset = bvec->bv_offset; | ||
1233 | |||
1234 | nsegs++; | ||
1235 | } | ||
1236 | bvprv = bvec; | ||
1237 | } /* segments in bio */ | ||
1238 | } /* bios in rq */ | ||
1239 | |||
1240 | return nsegs; | ||
1241 | } | ||
1242 | |||
1243 | EXPORT_SYMBOL(blk_rq_map_sg); | ||
1244 | |||
1245 | /* | ||
1246 | * the standard queue merge functions, can be overridden with device | ||
1247 | * specific ones if so desired | ||
1248 | */ | ||
1249 | |||
1250 | static inline int ll_new_mergeable(request_queue_t *q, | ||
1251 | struct request *req, | ||
1252 | struct bio *bio) | ||
1253 | { | ||
1254 | int nr_phys_segs = bio_phys_segments(q, bio); | ||
1255 | |||
1256 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | ||
1257 | req->flags |= REQ_NOMERGE; | ||
1258 | if (req == q->last_merge) | ||
1259 | q->last_merge = NULL; | ||
1260 | return 0; | ||
1261 | } | ||
1262 | |||
1263 | /* | ||
1264 | * A hw segment is just getting larger, bump just the phys | ||
1265 | * counter. | ||
1266 | */ | ||
1267 | req->nr_phys_segments += nr_phys_segs; | ||
1268 | return 1; | ||
1269 | } | ||
1270 | |||
1271 | static inline int ll_new_hw_segment(request_queue_t *q, | ||
1272 | struct request *req, | ||
1273 | struct bio *bio) | ||
1274 | { | ||
1275 | int nr_hw_segs = bio_hw_segments(q, bio); | ||
1276 | int nr_phys_segs = bio_phys_segments(q, bio); | ||
1277 | |||
1278 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments | ||
1279 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | ||
1280 | req->flags |= REQ_NOMERGE; | ||
1281 | if (req == q->last_merge) | ||
1282 | q->last_merge = NULL; | ||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | /* | ||
1287 | * This will form the start of a new hw segment. Bump both | ||
1288 | * counters. | ||
1289 | */ | ||
1290 | req->nr_hw_segments += nr_hw_segs; | ||
1291 | req->nr_phys_segments += nr_phys_segs; | ||
1292 | return 1; | ||
1293 | } | ||
1294 | |||
1295 | static int ll_back_merge_fn(request_queue_t *q, struct request *req, | ||
1296 | struct bio *bio) | ||
1297 | { | ||
1298 | int len; | ||
1299 | |||
1300 | if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { | ||
1301 | req->flags |= REQ_NOMERGE; | ||
1302 | if (req == q->last_merge) | ||
1303 | q->last_merge = NULL; | ||
1304 | return 0; | ||
1305 | } | ||
1306 | if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) | ||
1307 | blk_recount_segments(q, req->biotail); | ||
1308 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
1309 | blk_recount_segments(q, bio); | ||
1310 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | ||
1311 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && | ||
1312 | !BIOVEC_VIRT_OVERSIZE(len)) { | ||
1313 | int mergeable = ll_new_mergeable(q, req, bio); | ||
1314 | |||
1315 | if (mergeable) { | ||
1316 | if (req->nr_hw_segments == 1) | ||
1317 | req->bio->bi_hw_front_size = len; | ||
1318 | if (bio->bi_hw_segments == 1) | ||
1319 | bio->bi_hw_back_size = len; | ||
1320 | } | ||
1321 | return mergeable; | ||
1322 | } | ||
1323 | |||
1324 | return ll_new_hw_segment(q, req, bio); | ||
1325 | } | ||
1326 | |||
1327 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, | ||
1328 | struct bio *bio) | ||
1329 | { | ||
1330 | int len; | ||
1331 | |||
1332 | if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { | ||
1333 | req->flags |= REQ_NOMERGE; | ||
1334 | if (req == q->last_merge) | ||
1335 | q->last_merge = NULL; | ||
1336 | return 0; | ||
1337 | } | ||
1338 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | ||
1339 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
1340 | blk_recount_segments(q, bio); | ||
1341 | if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) | ||
1342 | blk_recount_segments(q, req->bio); | ||
1343 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | ||
1344 | !BIOVEC_VIRT_OVERSIZE(len)) { | ||
1345 | int mergeable = ll_new_mergeable(q, req, bio); | ||
1346 | |||
1347 | if (mergeable) { | ||
1348 | if (bio->bi_hw_segments == 1) | ||
1349 | bio->bi_hw_front_size = len; | ||
1350 | if (req->nr_hw_segments == 1) | ||
1351 | req->biotail->bi_hw_back_size = len; | ||
1352 | } | ||
1353 | return mergeable; | ||
1354 | } | ||
1355 | |||
1356 | return ll_new_hw_segment(q, req, bio); | ||
1357 | } | ||
1358 | |||
1359 | static int ll_merge_requests_fn(request_queue_t *q, struct request *req, | ||
1360 | struct request *next) | ||
1361 | { | ||
1362 | int total_phys_segments; | ||
1363 | int total_hw_segments; | ||
1364 | |||
1365 | /* | ||
1366 | * First check if the either of the requests are re-queued | ||
1367 | * requests. Can't merge them if they are. | ||
1368 | */ | ||
1369 | if (req->special || next->special) | ||
1370 | return 0; | ||
1371 | |||
1372 | /* | ||
1373 | * Will it become too large? | ||
1374 | */ | ||
1375 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) | ||
1376 | return 0; | ||
1377 | |||
1378 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | ||
1379 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) | ||
1380 | total_phys_segments--; | ||
1381 | |||
1382 | if (total_phys_segments > q->max_phys_segments) | ||
1383 | return 0; | ||
1384 | |||
1385 | total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; | ||
1386 | if (blk_hw_contig_segment(q, req->biotail, next->bio)) { | ||
1387 | int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; | ||
1388 | /* | ||
1389 | * propagate the combined length to the end of the requests | ||
1390 | */ | ||
1391 | if (req->nr_hw_segments == 1) | ||
1392 | req->bio->bi_hw_front_size = len; | ||
1393 | if (next->nr_hw_segments == 1) | ||
1394 | next->biotail->bi_hw_back_size = len; | ||
1395 | total_hw_segments--; | ||
1396 | } | ||
1397 | |||
1398 | if (total_hw_segments > q->max_hw_segments) | ||
1399 | return 0; | ||
1400 | |||
1401 | /* Merge is OK... */ | ||
1402 | req->nr_phys_segments = total_phys_segments; | ||
1403 | req->nr_hw_segments = total_hw_segments; | ||
1404 | return 1; | ||
1405 | } | ||
1406 | |||
1407 | /* | ||
1408 | * "plug" the device if there are no outstanding requests: this will | ||
1409 | * force the transfer to start only after we have put all the requests | ||
1410 | * on the list. | ||
1411 | * | ||
1412 | * This is called with interrupts off and no requests on the queue and | ||
1413 | * with the queue lock held. | ||
1414 | */ | ||
1415 | void blk_plug_device(request_queue_t *q) | ||
1416 | { | ||
1417 | WARN_ON(!irqs_disabled()); | ||
1418 | |||
1419 | /* | ||
1420 | * don't plug a stopped queue, it must be paired with blk_start_queue() | ||
1421 | * which will restart the queueing | ||
1422 | */ | ||
1423 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) | ||
1424 | return; | ||
1425 | |||
1426 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | ||
1427 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | ||
1428 | } | ||
1429 | |||
1430 | EXPORT_SYMBOL(blk_plug_device); | ||
1431 | |||
1432 | /* | ||
1433 | * remove the queue from the plugged list, if present. called with | ||
1434 | * queue lock held and interrupts disabled. | ||
1435 | */ | ||
1436 | int blk_remove_plug(request_queue_t *q) | ||
1437 | { | ||
1438 | WARN_ON(!irqs_disabled()); | ||
1439 | |||
1440 | if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | ||
1441 | return 0; | ||
1442 | |||
1443 | del_timer(&q->unplug_timer); | ||
1444 | return 1; | ||
1445 | } | ||
1446 | |||
1447 | EXPORT_SYMBOL(blk_remove_plug); | ||
1448 | |||
1449 | /* | ||
1450 | * remove the plug and let it rip.. | ||
1451 | */ | ||
1452 | void __generic_unplug_device(request_queue_t *q) | ||
1453 | { | ||
1454 | if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))) | ||
1455 | return; | ||
1456 | |||
1457 | if (!blk_remove_plug(q)) | ||
1458 | return; | ||
1459 | |||
1460 | q->request_fn(q); | ||
1461 | } | ||
1462 | EXPORT_SYMBOL(__generic_unplug_device); | ||
1463 | |||
1464 | /** | ||
1465 | * generic_unplug_device - fire a request queue | ||
1466 | * @q: The &request_queue_t in question | ||
1467 | * | ||
1468 | * Description: | ||
1469 | * Linux uses plugging to build bigger requests queues before letting | ||
1470 | * the device have at them. If a queue is plugged, the I/O scheduler | ||
1471 | * is still adding and merging requests on the queue. Once the queue | ||
1472 | * gets unplugged, the request_fn defined for the queue is invoked and | ||
1473 | * transfers started. | ||
1474 | **/ | ||
1475 | void generic_unplug_device(request_queue_t *q) | ||
1476 | { | ||
1477 | spin_lock_irq(q->queue_lock); | ||
1478 | __generic_unplug_device(q); | ||
1479 | spin_unlock_irq(q->queue_lock); | ||
1480 | } | ||
1481 | EXPORT_SYMBOL(generic_unplug_device); | ||
1482 | |||
1483 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | ||
1484 | struct page *page) | ||
1485 | { | ||
1486 | request_queue_t *q = bdi->unplug_io_data; | ||
1487 | |||
1488 | /* | ||
1489 | * devices don't necessarily have an ->unplug_fn defined | ||
1490 | */ | ||
1491 | if (q->unplug_fn) | ||
1492 | q->unplug_fn(q); | ||
1493 | } | ||
1494 | |||
1495 | static void blk_unplug_work(void *data) | ||
1496 | { | ||
1497 | request_queue_t *q = data; | ||
1498 | |||
1499 | q->unplug_fn(q); | ||
1500 | } | ||
1501 | |||
1502 | static void blk_unplug_timeout(unsigned long data) | ||
1503 | { | ||
1504 | request_queue_t *q = (request_queue_t *)data; | ||
1505 | |||
1506 | kblockd_schedule_work(&q->unplug_work); | ||
1507 | } | ||
1508 | |||
1509 | /** | ||
1510 | * blk_start_queue - restart a previously stopped queue | ||
1511 | * @q: The &request_queue_t in question | ||
1512 | * | ||
1513 | * Description: | ||
1514 | * blk_start_queue() will clear the stop flag on the queue, and call | ||
1515 | * the request_fn for the queue if it was in a stopped state when | ||
1516 | * entered. Also see blk_stop_queue(). Queue lock must be held. | ||
1517 | **/ | ||
1518 | void blk_start_queue(request_queue_t *q) | ||
1519 | { | ||
1520 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | ||
1521 | |||
1522 | /* | ||
1523 | * one level of recursion is ok and is much faster than kicking | ||
1524 | * the unplug handling | ||
1525 | */ | ||
1526 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | ||
1527 | q->request_fn(q); | ||
1528 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | ||
1529 | } else { | ||
1530 | blk_plug_device(q); | ||
1531 | kblockd_schedule_work(&q->unplug_work); | ||
1532 | } | ||
1533 | } | ||
1534 | |||
1535 | EXPORT_SYMBOL(blk_start_queue); | ||
1536 | |||
1537 | /** | ||
1538 | * blk_stop_queue - stop a queue | ||
1539 | * @q: The &request_queue_t in question | ||
1540 | * | ||
1541 | * Description: | ||
1542 | * The Linux block layer assumes that a block driver will consume all | ||
1543 | * entries on the request queue when the request_fn strategy is called. | ||
1544 | * Often this will not happen, because of hardware limitations (queue | ||
1545 | * depth settings). If a device driver gets a 'queue full' response, | ||
1546 | * or if it simply chooses not to queue more I/O at one point, it can | ||
1547 | * call this function to prevent the request_fn from being called until | ||
1548 | * the driver has signalled it's ready to go again. This happens by calling | ||
1549 | * blk_start_queue() to restart queue operations. Queue lock must be held. | ||
1550 | **/ | ||
1551 | void blk_stop_queue(request_queue_t *q) | ||
1552 | { | ||
1553 | blk_remove_plug(q); | ||
1554 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | ||
1555 | } | ||
1556 | EXPORT_SYMBOL(blk_stop_queue); | ||
1557 | |||
1558 | /** | ||
1559 | * blk_sync_queue - cancel any pending callbacks on a queue | ||
1560 | * @q: the queue | ||
1561 | * | ||
1562 | * Description: | ||
1563 | * The block layer may perform asynchronous callback activity | ||
1564 | * on a queue, such as calling the unplug function after a timeout. | ||
1565 | * A block device may call blk_sync_queue to ensure that any | ||
1566 | * such activity is cancelled, thus allowing it to release resources | ||
1567 | * the the callbacks might use. The caller must already have made sure | ||
1568 | * that its ->make_request_fn will not re-add plugging prior to calling | ||
1569 | * this function. | ||
1570 | * | ||
1571 | */ | ||
1572 | void blk_sync_queue(struct request_queue *q) | ||
1573 | { | ||
1574 | del_timer_sync(&q->unplug_timer); | ||
1575 | kblockd_flush(); | ||
1576 | } | ||
1577 | EXPORT_SYMBOL(blk_sync_queue); | ||
1578 | |||
1579 | /** | ||
1580 | * blk_run_queue - run a single device queue | ||
1581 | * @q: The queue to run | ||
1582 | */ | ||
1583 | void blk_run_queue(struct request_queue *q) | ||
1584 | { | ||
1585 | unsigned long flags; | ||
1586 | |||
1587 | spin_lock_irqsave(q->queue_lock, flags); | ||
1588 | blk_remove_plug(q); | ||
1589 | if (!elv_queue_empty(q)) | ||
1590 | q->request_fn(q); | ||
1591 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1592 | } | ||
1593 | EXPORT_SYMBOL(blk_run_queue); | ||
1594 | |||
1595 | /** | ||
1596 | * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed | ||
1597 | * @q: the request queue to be released | ||
1598 | * | ||
1599 | * Description: | ||
1600 | * blk_cleanup_queue is the pair to blk_init_queue() or | ||
1601 | * blk_queue_make_request(). It should be called when a request queue is | ||
1602 | * being released; typically when a block device is being de-registered. | ||
1603 | * Currently, its primary task it to free all the &struct request | ||
1604 | * structures that were allocated to the queue and the queue itself. | ||
1605 | * | ||
1606 | * Caveat: | ||
1607 | * Hopefully the low level driver will have finished any | ||
1608 | * outstanding requests first... | ||
1609 | **/ | ||
1610 | void blk_cleanup_queue(request_queue_t * q) | ||
1611 | { | ||
1612 | struct request_list *rl = &q->rq; | ||
1613 | |||
1614 | if (!atomic_dec_and_test(&q->refcnt)) | ||
1615 | return; | ||
1616 | |||
1617 | if (q->elevator) | ||
1618 | elevator_exit(q->elevator); | ||
1619 | |||
1620 | blk_sync_queue(q); | ||
1621 | |||
1622 | if (rl->rq_pool) | ||
1623 | mempool_destroy(rl->rq_pool); | ||
1624 | |||
1625 | if (q->queue_tags) | ||
1626 | __blk_queue_free_tags(q); | ||
1627 | |||
1628 | blk_queue_ordered(q, QUEUE_ORDERED_NONE); | ||
1629 | |||
1630 | kmem_cache_free(requestq_cachep, q); | ||
1631 | } | ||
1632 | |||
1633 | EXPORT_SYMBOL(blk_cleanup_queue); | ||
1634 | |||
1635 | static int blk_init_free_list(request_queue_t *q) | ||
1636 | { | ||
1637 | struct request_list *rl = &q->rq; | ||
1638 | |||
1639 | rl->count[READ] = rl->count[WRITE] = 0; | ||
1640 | rl->starved[READ] = rl->starved[WRITE] = 0; | ||
1641 | rl->elvpriv = 0; | ||
1642 | init_waitqueue_head(&rl->wait[READ]); | ||
1643 | init_waitqueue_head(&rl->wait[WRITE]); | ||
1644 | |||
1645 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | ||
1646 | mempool_free_slab, request_cachep, q->node); | ||
1647 | |||
1648 | if (!rl->rq_pool) | ||
1649 | return -ENOMEM; | ||
1650 | |||
1651 | return 0; | ||
1652 | } | ||
1653 | |||
1654 | static int __make_request(request_queue_t *, struct bio *); | ||
1655 | |||
1656 | request_queue_t *blk_alloc_queue(gfp_t gfp_mask) | ||
1657 | { | ||
1658 | return blk_alloc_queue_node(gfp_mask, -1); | ||
1659 | } | ||
1660 | EXPORT_SYMBOL(blk_alloc_queue); | ||
1661 | |||
1662 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | ||
1663 | { | ||
1664 | request_queue_t *q; | ||
1665 | |||
1666 | q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); | ||
1667 | if (!q) | ||
1668 | return NULL; | ||
1669 | |||
1670 | memset(q, 0, sizeof(*q)); | ||
1671 | init_timer(&q->unplug_timer); | ||
1672 | atomic_set(&q->refcnt, 1); | ||
1673 | |||
1674 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | ||
1675 | q->backing_dev_info.unplug_io_data = q; | ||
1676 | |||
1677 | return q; | ||
1678 | } | ||
1679 | EXPORT_SYMBOL(blk_alloc_queue_node); | ||
1680 | |||
1681 | /** | ||
1682 | * blk_init_queue - prepare a request queue for use with a block device | ||
1683 | * @rfn: The function to be called to process requests that have been | ||
1684 | * placed on the queue. | ||
1685 | * @lock: Request queue spin lock | ||
1686 | * | ||
1687 | * Description: | ||
1688 | * If a block device wishes to use the standard request handling procedures, | ||
1689 | * which sorts requests and coalesces adjacent requests, then it must | ||
1690 | * call blk_init_queue(). The function @rfn will be called when there | ||
1691 | * are requests on the queue that need to be processed. If the device | ||
1692 | * supports plugging, then @rfn may not be called immediately when requests | ||
1693 | * are available on the queue, but may be called at some time later instead. | ||
1694 | * Plugged queues are generally unplugged when a buffer belonging to one | ||
1695 | * of the requests on the queue is needed, or due to memory pressure. | ||
1696 | * | ||
1697 | * @rfn is not required, or even expected, to remove all requests off the | ||
1698 | * queue, but only as many as it can handle at a time. If it does leave | ||
1699 | * requests on the queue, it is responsible for arranging that the requests | ||
1700 | * get dealt with eventually. | ||
1701 | * | ||
1702 | * The queue spin lock must be held while manipulating the requests on the | ||
1703 | * request queue. | ||
1704 | * | ||
1705 | * Function returns a pointer to the initialized request queue, or NULL if | ||
1706 | * it didn't succeed. | ||
1707 | * | ||
1708 | * Note: | ||
1709 | * blk_init_queue() must be paired with a blk_cleanup_queue() call | ||
1710 | * when the block device is deactivated (such as at module unload). | ||
1711 | **/ | ||
1712 | |||
1713 | request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | ||
1714 | { | ||
1715 | return blk_init_queue_node(rfn, lock, -1); | ||
1716 | } | ||
1717 | EXPORT_SYMBOL(blk_init_queue); | ||
1718 | |||
1719 | request_queue_t * | ||
1720 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | ||
1721 | { | ||
1722 | request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | ||
1723 | |||
1724 | if (!q) | ||
1725 | return NULL; | ||
1726 | |||
1727 | q->node = node_id; | ||
1728 | if (blk_init_free_list(q)) | ||
1729 | goto out_init; | ||
1730 | |||
1731 | /* | ||
1732 | * if caller didn't supply a lock, they get per-queue locking with | ||
1733 | * our embedded lock | ||
1734 | */ | ||
1735 | if (!lock) { | ||
1736 | spin_lock_init(&q->__queue_lock); | ||
1737 | lock = &q->__queue_lock; | ||
1738 | } | ||
1739 | |||
1740 | q->request_fn = rfn; | ||
1741 | q->back_merge_fn = ll_back_merge_fn; | ||
1742 | q->front_merge_fn = ll_front_merge_fn; | ||
1743 | q->merge_requests_fn = ll_merge_requests_fn; | ||
1744 | q->prep_rq_fn = NULL; | ||
1745 | q->unplug_fn = generic_unplug_device; | ||
1746 | q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); | ||
1747 | q->queue_lock = lock; | ||
1748 | |||
1749 | blk_queue_segment_boundary(q, 0xffffffff); | ||
1750 | |||
1751 | blk_queue_make_request(q, __make_request); | ||
1752 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); | ||
1753 | |||
1754 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
1755 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
1756 | |||
1757 | /* | ||
1758 | * all done | ||
1759 | */ | ||
1760 | if (!elevator_init(q, NULL)) { | ||
1761 | blk_queue_congestion_threshold(q); | ||
1762 | return q; | ||
1763 | } | ||
1764 | |||
1765 | blk_cleanup_queue(q); | ||
1766 | out_init: | ||
1767 | kmem_cache_free(requestq_cachep, q); | ||
1768 | return NULL; | ||
1769 | } | ||
1770 | EXPORT_SYMBOL(blk_init_queue_node); | ||
1771 | |||
1772 | int blk_get_queue(request_queue_t *q) | ||
1773 | { | ||
1774 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
1775 | atomic_inc(&q->refcnt); | ||
1776 | return 0; | ||
1777 | } | ||
1778 | |||
1779 | return 1; | ||
1780 | } | ||
1781 | |||
1782 | EXPORT_SYMBOL(blk_get_queue); | ||
1783 | |||
1784 | static inline void blk_free_request(request_queue_t *q, struct request *rq) | ||
1785 | { | ||
1786 | if (rq->flags & REQ_ELVPRIV) | ||
1787 | elv_put_request(q, rq); | ||
1788 | mempool_free(rq, q->rq.rq_pool); | ||
1789 | } | ||
1790 | |||
1791 | static inline struct request * | ||
1792 | blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, | ||
1793 | int priv, gfp_t gfp_mask) | ||
1794 | { | ||
1795 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | ||
1796 | |||
1797 | if (!rq) | ||
1798 | return NULL; | ||
1799 | |||
1800 | /* | ||
1801 | * first three bits are identical in rq->flags and bio->bi_rw, | ||
1802 | * see bio.h and blkdev.h | ||
1803 | */ | ||
1804 | rq->flags = rw; | ||
1805 | |||
1806 | if (priv) { | ||
1807 | if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { | ||
1808 | mempool_free(rq, q->rq.rq_pool); | ||
1809 | return NULL; | ||
1810 | } | ||
1811 | rq->flags |= REQ_ELVPRIV; | ||
1812 | } | ||
1813 | |||
1814 | return rq; | ||
1815 | } | ||
1816 | |||
1817 | /* | ||
1818 | * ioc_batching returns true if the ioc is a valid batching request and | ||
1819 | * should be given priority access to a request. | ||
1820 | */ | ||
1821 | static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) | ||
1822 | { | ||
1823 | if (!ioc) | ||
1824 | return 0; | ||
1825 | |||
1826 | /* | ||
1827 | * Make sure the process is able to allocate at least 1 request | ||
1828 | * even if the batch times out, otherwise we could theoretically | ||
1829 | * lose wakeups. | ||
1830 | */ | ||
1831 | return ioc->nr_batch_requests == q->nr_batching || | ||
1832 | (ioc->nr_batch_requests > 0 | ||
1833 | && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); | ||
1834 | } | ||
1835 | |||
1836 | /* | ||
1837 | * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This | ||
1838 | * will cause the process to be a "batcher" on all queues in the system. This | ||
1839 | * is the behaviour we want though - once it gets a wakeup it should be given | ||
1840 | * a nice run. | ||
1841 | */ | ||
1842 | static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) | ||
1843 | { | ||
1844 | if (!ioc || ioc_batching(q, ioc)) | ||
1845 | return; | ||
1846 | |||
1847 | ioc->nr_batch_requests = q->nr_batching; | ||
1848 | ioc->last_waited = jiffies; | ||
1849 | } | ||
1850 | |||
1851 | static void __freed_request(request_queue_t *q, int rw) | ||
1852 | { | ||
1853 | struct request_list *rl = &q->rq; | ||
1854 | |||
1855 | if (rl->count[rw] < queue_congestion_off_threshold(q)) | ||
1856 | clear_queue_congested(q, rw); | ||
1857 | |||
1858 | if (rl->count[rw] + 1 <= q->nr_requests) { | ||
1859 | if (waitqueue_active(&rl->wait[rw])) | ||
1860 | wake_up(&rl->wait[rw]); | ||
1861 | |||
1862 | blk_clear_queue_full(q, rw); | ||
1863 | } | ||
1864 | } | ||
1865 | |||
1866 | /* | ||
1867 | * A request has just been released. Account for it, update the full and | ||
1868 | * congestion status, wake up any waiters. Called under q->queue_lock. | ||
1869 | */ | ||
1870 | static void freed_request(request_queue_t *q, int rw, int priv) | ||
1871 | { | ||
1872 | struct request_list *rl = &q->rq; | ||
1873 | |||
1874 | rl->count[rw]--; | ||
1875 | if (priv) | ||
1876 | rl->elvpriv--; | ||
1877 | |||
1878 | __freed_request(q, rw); | ||
1879 | |||
1880 | if (unlikely(rl->starved[rw ^ 1])) | ||
1881 | __freed_request(q, rw ^ 1); | ||
1882 | } | ||
1883 | |||
1884 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) | ||
1885 | /* | ||
1886 | * Get a free request, queue_lock must be held. | ||
1887 | * Returns NULL on failure, with queue_lock held. | ||
1888 | * Returns !NULL on success, with queue_lock *not held*. | ||
1889 | */ | ||
1890 | static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | ||
1891 | gfp_t gfp_mask) | ||
1892 | { | ||
1893 | struct request *rq = NULL; | ||
1894 | struct request_list *rl = &q->rq; | ||
1895 | struct io_context *ioc = current_io_context(GFP_ATOMIC); | ||
1896 | int priv; | ||
1897 | |||
1898 | if (rl->count[rw]+1 >= q->nr_requests) { | ||
1899 | /* | ||
1900 | * The queue will fill after this allocation, so set it as | ||
1901 | * full, and mark this process as "batching". This process | ||
1902 | * will be allowed to complete a batch of requests, others | ||
1903 | * will be blocked. | ||
1904 | */ | ||
1905 | if (!blk_queue_full(q, rw)) { | ||
1906 | ioc_set_batching(q, ioc); | ||
1907 | blk_set_queue_full(q, rw); | ||
1908 | } | ||
1909 | } | ||
1910 | |||
1911 | switch (elv_may_queue(q, rw, bio)) { | ||
1912 | case ELV_MQUEUE_NO: | ||
1913 | goto rq_starved; | ||
1914 | case ELV_MQUEUE_MAY: | ||
1915 | break; | ||
1916 | case ELV_MQUEUE_MUST: | ||
1917 | goto get_rq; | ||
1918 | } | ||
1919 | |||
1920 | if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) { | ||
1921 | /* | ||
1922 | * The queue is full and the allocating process is not a | ||
1923 | * "batcher", and not exempted by the IO scheduler | ||
1924 | */ | ||
1925 | goto out; | ||
1926 | } | ||
1927 | |||
1928 | get_rq: | ||
1929 | /* | ||
1930 | * Only allow batching queuers to allocate up to 50% over the defined | ||
1931 | * limit of requests, otherwise we could have thousands of requests | ||
1932 | * allocated with any setting of ->nr_requests | ||
1933 | */ | ||
1934 | if (rl->count[rw] >= (3 * q->nr_requests / 2)) | ||
1935 | goto out; | ||
1936 | |||
1937 | rl->count[rw]++; | ||
1938 | rl->starved[rw] = 0; | ||
1939 | if (rl->count[rw] >= queue_congestion_on_threshold(q)) | ||
1940 | set_queue_congested(q, rw); | ||
1941 | |||
1942 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | ||
1943 | if (priv) | ||
1944 | rl->elvpriv++; | ||
1945 | |||
1946 | spin_unlock_irq(q->queue_lock); | ||
1947 | |||
1948 | rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); | ||
1949 | if (!rq) { | ||
1950 | /* | ||
1951 | * Allocation failed presumably due to memory. Undo anything | ||
1952 | * we might have messed up. | ||
1953 | * | ||
1954 | * Allocating task should really be put onto the front of the | ||
1955 | * wait queue, but this is pretty rare. | ||
1956 | */ | ||
1957 | spin_lock_irq(q->queue_lock); | ||
1958 | freed_request(q, rw, priv); | ||
1959 | |||
1960 | /* | ||
1961 | * in the very unlikely event that allocation failed and no | ||
1962 | * requests for this direction was pending, mark us starved | ||
1963 | * so that freeing of a request in the other direction will | ||
1964 | * notice us. another possible fix would be to split the | ||
1965 | * rq mempool into READ and WRITE | ||
1966 | */ | ||
1967 | rq_starved: | ||
1968 | if (unlikely(rl->count[rw] == 0)) | ||
1969 | rl->starved[rw] = 1; | ||
1970 | |||
1971 | goto out; | ||
1972 | } | ||
1973 | |||
1974 | if (ioc_batching(q, ioc)) | ||
1975 | ioc->nr_batch_requests--; | ||
1976 | |||
1977 | rq_init(q, rq); | ||
1978 | rq->rl = rl; | ||
1979 | out: | ||
1980 | return rq; | ||
1981 | } | ||
1982 | |||
1983 | /* | ||
1984 | * No available requests for this queue, unplug the device and wait for some | ||
1985 | * requests to become available. | ||
1986 | * | ||
1987 | * Called with q->queue_lock held, and returns with it unlocked. | ||
1988 | */ | ||
1989 | static struct request *get_request_wait(request_queue_t *q, int rw, | ||
1990 | struct bio *bio) | ||
1991 | { | ||
1992 | struct request *rq; | ||
1993 | |||
1994 | rq = get_request(q, rw, bio, GFP_NOIO); | ||
1995 | while (!rq) { | ||
1996 | DEFINE_WAIT(wait); | ||
1997 | struct request_list *rl = &q->rq; | ||
1998 | |||
1999 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | ||
2000 | TASK_UNINTERRUPTIBLE); | ||
2001 | |||
2002 | rq = get_request(q, rw, bio, GFP_NOIO); | ||
2003 | |||
2004 | if (!rq) { | ||
2005 | struct io_context *ioc; | ||
2006 | |||
2007 | __generic_unplug_device(q); | ||
2008 | spin_unlock_irq(q->queue_lock); | ||
2009 | io_schedule(); | ||
2010 | |||
2011 | /* | ||
2012 | * After sleeping, we become a "batching" process and | ||
2013 | * will be able to allocate at least one request, and | ||
2014 | * up to a big batch of them for a small period time. | ||
2015 | * See ioc_batching, ioc_set_batching | ||
2016 | */ | ||
2017 | ioc = current_io_context(GFP_NOIO); | ||
2018 | ioc_set_batching(q, ioc); | ||
2019 | |||
2020 | spin_lock_irq(q->queue_lock); | ||
2021 | } | ||
2022 | finish_wait(&rl->wait[rw], &wait); | ||
2023 | } | ||
2024 | |||
2025 | return rq; | ||
2026 | } | ||
2027 | |||
2028 | struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) | ||
2029 | { | ||
2030 | struct request *rq; | ||
2031 | |||
2032 | BUG_ON(rw != READ && rw != WRITE); | ||
2033 | |||
2034 | spin_lock_irq(q->queue_lock); | ||
2035 | if (gfp_mask & __GFP_WAIT) { | ||
2036 | rq = get_request_wait(q, rw, NULL); | ||
2037 | } else { | ||
2038 | rq = get_request(q, rw, NULL, gfp_mask); | ||
2039 | if (!rq) | ||
2040 | spin_unlock_irq(q->queue_lock); | ||
2041 | } | ||
2042 | /* q->queue_lock is unlocked at this point */ | ||
2043 | |||
2044 | return rq; | ||
2045 | } | ||
2046 | EXPORT_SYMBOL(blk_get_request); | ||
2047 | |||
2048 | /** | ||
2049 | * blk_requeue_request - put a request back on queue | ||
2050 | * @q: request queue where request should be inserted | ||
2051 | * @rq: request to be inserted | ||
2052 | * | ||
2053 | * Description: | ||
2054 | * Drivers often keep queueing requests until the hardware cannot accept | ||
2055 | * more, when that condition happens we need to put the request back | ||
2056 | * on the queue. Must be called with queue lock held. | ||
2057 | */ | ||
2058 | void blk_requeue_request(request_queue_t *q, struct request *rq) | ||
2059 | { | ||
2060 | if (blk_rq_tagged(rq)) | ||
2061 | blk_queue_end_tag(q, rq); | ||
2062 | |||
2063 | elv_requeue_request(q, rq); | ||
2064 | } | ||
2065 | |||
2066 | EXPORT_SYMBOL(blk_requeue_request); | ||
2067 | |||
2068 | /** | ||
2069 | * blk_insert_request - insert a special request in to a request queue | ||
2070 | * @q: request queue where request should be inserted | ||
2071 | * @rq: request to be inserted | ||
2072 | * @at_head: insert request at head or tail of queue | ||
2073 | * @data: private data | ||
2074 | * | ||
2075 | * Description: | ||
2076 | * Many block devices need to execute commands asynchronously, so they don't | ||
2077 | * block the whole kernel from preemption during request execution. This is | ||
2078 | * accomplished normally by inserting aritficial requests tagged as | ||
2079 | * REQ_SPECIAL in to the corresponding request queue, and letting them be | ||
2080 | * scheduled for actual execution by the request queue. | ||
2081 | * | ||
2082 | * We have the option of inserting the head or the tail of the queue. | ||
2083 | * Typically we use the tail for new ioctls and so forth. We use the head | ||
2084 | * of the queue for things like a QUEUE_FULL message from a device, or a | ||
2085 | * host that is unable to accept a particular command. | ||
2086 | */ | ||
2087 | void blk_insert_request(request_queue_t *q, struct request *rq, | ||
2088 | int at_head, void *data) | ||
2089 | { | ||
2090 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
2091 | unsigned long flags; | ||
2092 | |||
2093 | /* | ||
2094 | * tell I/O scheduler that this isn't a regular read/write (ie it | ||
2095 | * must not attempt merges on this) and that it acts as a soft | ||
2096 | * barrier | ||
2097 | */ | ||
2098 | rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; | ||
2099 | |||
2100 | rq->special = data; | ||
2101 | |||
2102 | spin_lock_irqsave(q->queue_lock, flags); | ||
2103 | |||
2104 | /* | ||
2105 | * If command is tagged, release the tag | ||
2106 | */ | ||
2107 | if (blk_rq_tagged(rq)) | ||
2108 | blk_queue_end_tag(q, rq); | ||
2109 | |||
2110 | drive_stat_acct(rq, rq->nr_sectors, 1); | ||
2111 | __elv_add_request(q, rq, where, 0); | ||
2112 | |||
2113 | if (blk_queue_plugged(q)) | ||
2114 | __generic_unplug_device(q); | ||
2115 | else | ||
2116 | q->request_fn(q); | ||
2117 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2118 | } | ||
2119 | |||
2120 | EXPORT_SYMBOL(blk_insert_request); | ||
2121 | |||
2122 | /** | ||
2123 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | ||
2124 | * @q: request queue where request should be inserted | ||
2125 | * @rq: request structure to fill | ||
2126 | * @ubuf: the user buffer | ||
2127 | * @len: length of user data | ||
2128 | * | ||
2129 | * Description: | ||
2130 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
2131 | * a kernel bounce buffer is used. | ||
2132 | * | ||
2133 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
2134 | * still in process context. | ||
2135 | * | ||
2136 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
2137 | * before being submitted to the device, as pages mapped may be out of | ||
2138 | * reach. It's the callers responsibility to make sure this happens. The | ||
2139 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
2140 | * unmapping. | ||
2141 | */ | ||
2142 | int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | ||
2143 | unsigned int len) | ||
2144 | { | ||
2145 | unsigned long uaddr; | ||
2146 | struct bio *bio; | ||
2147 | int reading; | ||
2148 | |||
2149 | if (len > (q->max_sectors << 9)) | ||
2150 | return -EINVAL; | ||
2151 | if (!len || !ubuf) | ||
2152 | return -EINVAL; | ||
2153 | |||
2154 | reading = rq_data_dir(rq) == READ; | ||
2155 | |||
2156 | /* | ||
2157 | * if alignment requirement is satisfied, map in user pages for | ||
2158 | * direct dma. else, set up kernel bounce buffers | ||
2159 | */ | ||
2160 | uaddr = (unsigned long) ubuf; | ||
2161 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
2162 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
2163 | else | ||
2164 | bio = bio_copy_user(q, uaddr, len, reading); | ||
2165 | |||
2166 | if (!IS_ERR(bio)) { | ||
2167 | rq->bio = rq->biotail = bio; | ||
2168 | blk_rq_bio_prep(q, rq, bio); | ||
2169 | |||
2170 | rq->buffer = rq->data = NULL; | ||
2171 | rq->data_len = len; | ||
2172 | return 0; | ||
2173 | } | ||
2174 | |||
2175 | /* | ||
2176 | * bio is the err-ptr | ||
2177 | */ | ||
2178 | return PTR_ERR(bio); | ||
2179 | } | ||
2180 | |||
2181 | EXPORT_SYMBOL(blk_rq_map_user); | ||
2182 | |||
2183 | /** | ||
2184 | * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage | ||
2185 | * @q: request queue where request should be inserted | ||
2186 | * @rq: request to map data to | ||
2187 | * @iov: pointer to the iovec | ||
2188 | * @iov_count: number of elements in the iovec | ||
2189 | * | ||
2190 | * Description: | ||
2191 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
2192 | * a kernel bounce buffer is used. | ||
2193 | * | ||
2194 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
2195 | * still in process context. | ||
2196 | * | ||
2197 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
2198 | * before being submitted to the device, as pages mapped may be out of | ||
2199 | * reach. It's the callers responsibility to make sure this happens. The | ||
2200 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
2201 | * unmapping. | ||
2202 | */ | ||
2203 | int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, | ||
2204 | struct sg_iovec *iov, int iov_count) | ||
2205 | { | ||
2206 | struct bio *bio; | ||
2207 | |||
2208 | if (!iov || iov_count <= 0) | ||
2209 | return -EINVAL; | ||
2210 | |||
2211 | /* we don't allow misaligned data like bio_map_user() does. If the | ||
2212 | * user is using sg, they're expected to know the alignment constraints | ||
2213 | * and respect them accordingly */ | ||
2214 | bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); | ||
2215 | if (IS_ERR(bio)) | ||
2216 | return PTR_ERR(bio); | ||
2217 | |||
2218 | rq->bio = rq->biotail = bio; | ||
2219 | blk_rq_bio_prep(q, rq, bio); | ||
2220 | rq->buffer = rq->data = NULL; | ||
2221 | rq->data_len = bio->bi_size; | ||
2222 | return 0; | ||
2223 | } | ||
2224 | |||
2225 | EXPORT_SYMBOL(blk_rq_map_user_iov); | ||
2226 | |||
2227 | /** | ||
2228 | * blk_rq_unmap_user - unmap a request with user data | ||
2229 | * @bio: bio to be unmapped | ||
2230 | * @ulen: length of user buffer | ||
2231 | * | ||
2232 | * Description: | ||
2233 | * Unmap a bio previously mapped by blk_rq_map_user(). | ||
2234 | */ | ||
2235 | int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) | ||
2236 | { | ||
2237 | int ret = 0; | ||
2238 | |||
2239 | if (bio) { | ||
2240 | if (bio_flagged(bio, BIO_USER_MAPPED)) | ||
2241 | bio_unmap_user(bio); | ||
2242 | else | ||
2243 | ret = bio_uncopy_user(bio); | ||
2244 | } | ||
2245 | |||
2246 | return 0; | ||
2247 | } | ||
2248 | |||
2249 | EXPORT_SYMBOL(blk_rq_unmap_user); | ||
2250 | |||
2251 | /** | ||
2252 | * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage | ||
2253 | * @q: request queue where request should be inserted | ||
2254 | * @rq: request to fill | ||
2255 | * @kbuf: the kernel buffer | ||
2256 | * @len: length of user data | ||
2257 | * @gfp_mask: memory allocation flags | ||
2258 | */ | ||
2259 | int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | ||
2260 | unsigned int len, gfp_t gfp_mask) | ||
2261 | { | ||
2262 | struct bio *bio; | ||
2263 | |||
2264 | if (len > (q->max_sectors << 9)) | ||
2265 | return -EINVAL; | ||
2266 | if (!len || !kbuf) | ||
2267 | return -EINVAL; | ||
2268 | |||
2269 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | ||
2270 | if (IS_ERR(bio)) | ||
2271 | return PTR_ERR(bio); | ||
2272 | |||
2273 | if (rq_data_dir(rq) == WRITE) | ||
2274 | bio->bi_rw |= (1 << BIO_RW); | ||
2275 | |||
2276 | rq->bio = rq->biotail = bio; | ||
2277 | blk_rq_bio_prep(q, rq, bio); | ||
2278 | |||
2279 | rq->buffer = rq->data = NULL; | ||
2280 | rq->data_len = len; | ||
2281 | return 0; | ||
2282 | } | ||
2283 | |||
2284 | EXPORT_SYMBOL(blk_rq_map_kern); | ||
2285 | |||
2286 | /** | ||
2287 | * blk_execute_rq_nowait - insert a request into queue for execution | ||
2288 | * @q: queue to insert the request in | ||
2289 | * @bd_disk: matching gendisk | ||
2290 | * @rq: request to insert | ||
2291 | * @at_head: insert request at head or tail of queue | ||
2292 | * @done: I/O completion handler | ||
2293 | * | ||
2294 | * Description: | ||
2295 | * Insert a fully prepared request at the back of the io scheduler queue | ||
2296 | * for execution. Don't wait for completion. | ||
2297 | */ | ||
2298 | void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, | ||
2299 | struct request *rq, int at_head, | ||
2300 | void (*done)(struct request *)) | ||
2301 | { | ||
2302 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
2303 | |||
2304 | rq->rq_disk = bd_disk; | ||
2305 | rq->flags |= REQ_NOMERGE; | ||
2306 | rq->end_io = done; | ||
2307 | elv_add_request(q, rq, where, 1); | ||
2308 | generic_unplug_device(q); | ||
2309 | } | ||
2310 | |||
2311 | /** | ||
2312 | * blk_execute_rq - insert a request into queue for execution | ||
2313 | * @q: queue to insert the request in | ||
2314 | * @bd_disk: matching gendisk | ||
2315 | * @rq: request to insert | ||
2316 | * @at_head: insert request at head or tail of queue | ||
2317 | * | ||
2318 | * Description: | ||
2319 | * Insert a fully prepared request at the back of the io scheduler queue | ||
2320 | * for execution and wait for completion. | ||
2321 | */ | ||
2322 | int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, | ||
2323 | struct request *rq, int at_head) | ||
2324 | { | ||
2325 | DECLARE_COMPLETION(wait); | ||
2326 | char sense[SCSI_SENSE_BUFFERSIZE]; | ||
2327 | int err = 0; | ||
2328 | |||
2329 | /* | ||
2330 | * we need an extra reference to the request, so we can look at | ||
2331 | * it after io completion | ||
2332 | */ | ||
2333 | rq->ref_count++; | ||
2334 | |||
2335 | if (!rq->sense) { | ||
2336 | memset(sense, 0, sizeof(sense)); | ||
2337 | rq->sense = sense; | ||
2338 | rq->sense_len = 0; | ||
2339 | } | ||
2340 | |||
2341 | rq->waiting = &wait; | ||
2342 | blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); | ||
2343 | wait_for_completion(&wait); | ||
2344 | rq->waiting = NULL; | ||
2345 | |||
2346 | if (rq->errors) | ||
2347 | err = -EIO; | ||
2348 | |||
2349 | return err; | ||
2350 | } | ||
2351 | |||
2352 | EXPORT_SYMBOL(blk_execute_rq); | ||
2353 | |||
2354 | /** | ||
2355 | * blkdev_issue_flush - queue a flush | ||
2356 | * @bdev: blockdev to issue flush for | ||
2357 | * @error_sector: error sector | ||
2358 | * | ||
2359 | * Description: | ||
2360 | * Issue a flush for the block device in question. Caller can supply | ||
2361 | * room for storing the error offset in case of a flush error, if they | ||
2362 | * wish to. Caller must run wait_for_completion() on its own. | ||
2363 | */ | ||
2364 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | ||
2365 | { | ||
2366 | request_queue_t *q; | ||
2367 | |||
2368 | if (bdev->bd_disk == NULL) | ||
2369 | return -ENXIO; | ||
2370 | |||
2371 | q = bdev_get_queue(bdev); | ||
2372 | if (!q) | ||
2373 | return -ENXIO; | ||
2374 | if (!q->issue_flush_fn) | ||
2375 | return -EOPNOTSUPP; | ||
2376 | |||
2377 | return q->issue_flush_fn(q, bdev->bd_disk, error_sector); | ||
2378 | } | ||
2379 | |||
2380 | EXPORT_SYMBOL(blkdev_issue_flush); | ||
2381 | |||
2382 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | ||
2383 | { | ||
2384 | int rw = rq_data_dir(rq); | ||
2385 | |||
2386 | if (!blk_fs_request(rq) || !rq->rq_disk) | ||
2387 | return; | ||
2388 | |||
2389 | if (!new_io) { | ||
2390 | __disk_stat_inc(rq->rq_disk, merges[rw]); | ||
2391 | } else { | ||
2392 | disk_round_stats(rq->rq_disk); | ||
2393 | rq->rq_disk->in_flight++; | ||
2394 | } | ||
2395 | } | ||
2396 | |||
2397 | /* | ||
2398 | * add-request adds a request to the linked list. | ||
2399 | * queue lock is held and interrupts disabled, as we muck with the | ||
2400 | * request queue list. | ||
2401 | */ | ||
2402 | static inline void add_request(request_queue_t * q, struct request * req) | ||
2403 | { | ||
2404 | drive_stat_acct(req, req->nr_sectors, 1); | ||
2405 | |||
2406 | if (q->activity_fn) | ||
2407 | q->activity_fn(q->activity_data, rq_data_dir(req)); | ||
2408 | |||
2409 | /* | ||
2410 | * elevator indicated where it wants this request to be | ||
2411 | * inserted at elevator_merge time | ||
2412 | */ | ||
2413 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); | ||
2414 | } | ||
2415 | |||
2416 | /* | ||
2417 | * disk_round_stats() - Round off the performance stats on a struct | ||
2418 | * disk_stats. | ||
2419 | * | ||
2420 | * The average IO queue length and utilisation statistics are maintained | ||
2421 | * by observing the current state of the queue length and the amount of | ||
2422 | * time it has been in this state for. | ||
2423 | * | ||
2424 | * Normally, that accounting is done on IO completion, but that can result | ||
2425 | * in more than a second's worth of IO being accounted for within any one | ||
2426 | * second, leading to >100% utilisation. To deal with that, we call this | ||
2427 | * function to do a round-off before returning the results when reading | ||
2428 | * /proc/diskstats. This accounts immediately for all queue usage up to | ||
2429 | * the current jiffies and restarts the counters again. | ||
2430 | */ | ||
2431 | void disk_round_stats(struct gendisk *disk) | ||
2432 | { | ||
2433 | unsigned long now = jiffies; | ||
2434 | |||
2435 | if (now == disk->stamp) | ||
2436 | return; | ||
2437 | |||
2438 | if (disk->in_flight) { | ||
2439 | __disk_stat_add(disk, time_in_queue, | ||
2440 | disk->in_flight * (now - disk->stamp)); | ||
2441 | __disk_stat_add(disk, io_ticks, (now - disk->stamp)); | ||
2442 | } | ||
2443 | disk->stamp = now; | ||
2444 | } | ||
2445 | |||
2446 | /* | ||
2447 | * queue lock must be held | ||
2448 | */ | ||
2449 | static void __blk_put_request(request_queue_t *q, struct request *req) | ||
2450 | { | ||
2451 | struct request_list *rl = req->rl; | ||
2452 | |||
2453 | if (unlikely(!q)) | ||
2454 | return; | ||
2455 | if (unlikely(--req->ref_count)) | ||
2456 | return; | ||
2457 | |||
2458 | elv_completed_request(q, req); | ||
2459 | |||
2460 | req->rq_status = RQ_INACTIVE; | ||
2461 | req->rl = NULL; | ||
2462 | |||
2463 | /* | ||
2464 | * Request may not have originated from ll_rw_blk. if not, | ||
2465 | * it didn't come out of our reserved rq pools | ||
2466 | */ | ||
2467 | if (rl) { | ||
2468 | int rw = rq_data_dir(req); | ||
2469 | int priv = req->flags & REQ_ELVPRIV; | ||
2470 | |||
2471 | BUG_ON(!list_empty(&req->queuelist)); | ||
2472 | |||
2473 | blk_free_request(q, req); | ||
2474 | freed_request(q, rw, priv); | ||
2475 | } | ||
2476 | } | ||
2477 | |||
2478 | void blk_put_request(struct request *req) | ||
2479 | { | ||
2480 | unsigned long flags; | ||
2481 | request_queue_t *q = req->q; | ||
2482 | |||
2483 | /* | ||
2484 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the | ||
2485 | * following if (q) test. | ||
2486 | */ | ||
2487 | if (q) { | ||
2488 | spin_lock_irqsave(q->queue_lock, flags); | ||
2489 | __blk_put_request(q, req); | ||
2490 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2491 | } | ||
2492 | } | ||
2493 | |||
2494 | EXPORT_SYMBOL(blk_put_request); | ||
2495 | |||
2496 | /** | ||
2497 | * blk_end_sync_rq - executes a completion event on a request | ||
2498 | * @rq: request to complete | ||
2499 | */ | ||
2500 | void blk_end_sync_rq(struct request *rq) | ||
2501 | { | ||
2502 | struct completion *waiting = rq->waiting; | ||
2503 | |||
2504 | rq->waiting = NULL; | ||
2505 | __blk_put_request(rq->q, rq); | ||
2506 | |||
2507 | /* | ||
2508 | * complete last, if this is a stack request the process (and thus | ||
2509 | * the rq pointer) could be invalid right after this complete() | ||
2510 | */ | ||
2511 | complete(waiting); | ||
2512 | } | ||
2513 | EXPORT_SYMBOL(blk_end_sync_rq); | ||
2514 | |||
2515 | /** | ||
2516 | * blk_congestion_wait - wait for a queue to become uncongested | ||
2517 | * @rw: READ or WRITE | ||
2518 | * @timeout: timeout in jiffies | ||
2519 | * | ||
2520 | * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion. | ||
2521 | * If no queues are congested then just wait for the next request to be | ||
2522 | * returned. | ||
2523 | */ | ||
2524 | long blk_congestion_wait(int rw, long timeout) | ||
2525 | { | ||
2526 | long ret; | ||
2527 | DEFINE_WAIT(wait); | ||
2528 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | ||
2529 | |||
2530 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | ||
2531 | ret = io_schedule_timeout(timeout); | ||
2532 | finish_wait(wqh, &wait); | ||
2533 | return ret; | ||
2534 | } | ||
2535 | |||
2536 | EXPORT_SYMBOL(blk_congestion_wait); | ||
2537 | |||
2538 | /* | ||
2539 | * Has to be called with the request spinlock acquired | ||
2540 | */ | ||
2541 | static int attempt_merge(request_queue_t *q, struct request *req, | ||
2542 | struct request *next) | ||
2543 | { | ||
2544 | if (!rq_mergeable(req) || !rq_mergeable(next)) | ||
2545 | return 0; | ||
2546 | |||
2547 | /* | ||
2548 | * not contigious | ||
2549 | */ | ||
2550 | if (req->sector + req->nr_sectors != next->sector) | ||
2551 | return 0; | ||
2552 | |||
2553 | if (rq_data_dir(req) != rq_data_dir(next) | ||
2554 | || req->rq_disk != next->rq_disk | ||
2555 | || next->waiting || next->special) | ||
2556 | return 0; | ||
2557 | |||
2558 | /* | ||
2559 | * If we are allowed to merge, then append bio list | ||
2560 | * from next to rq and release next. merge_requests_fn | ||
2561 | * will have updated segment counts, update sector | ||
2562 | * counts here. | ||
2563 | */ | ||
2564 | if (!q->merge_requests_fn(q, req, next)) | ||
2565 | return 0; | ||
2566 | |||
2567 | /* | ||
2568 | * At this point we have either done a back merge | ||
2569 | * or front merge. We need the smaller start_time of | ||
2570 | * the merged requests to be the current request | ||
2571 | * for accounting purposes. | ||
2572 | */ | ||
2573 | if (time_after(req->start_time, next->start_time)) | ||
2574 | req->start_time = next->start_time; | ||
2575 | |||
2576 | req->biotail->bi_next = next->bio; | ||
2577 | req->biotail = next->biotail; | ||
2578 | |||
2579 | req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; | ||
2580 | |||
2581 | elv_merge_requests(q, req, next); | ||
2582 | |||
2583 | if (req->rq_disk) { | ||
2584 | disk_round_stats(req->rq_disk); | ||
2585 | req->rq_disk->in_flight--; | ||
2586 | } | ||
2587 | |||
2588 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | ||
2589 | |||
2590 | __blk_put_request(q, next); | ||
2591 | return 1; | ||
2592 | } | ||
2593 | |||
2594 | static inline int attempt_back_merge(request_queue_t *q, struct request *rq) | ||
2595 | { | ||
2596 | struct request *next = elv_latter_request(q, rq); | ||
2597 | |||
2598 | if (next) | ||
2599 | return attempt_merge(q, rq, next); | ||
2600 | |||
2601 | return 0; | ||
2602 | } | ||
2603 | |||
2604 | static inline int attempt_front_merge(request_queue_t *q, struct request *rq) | ||
2605 | { | ||
2606 | struct request *prev = elv_former_request(q, rq); | ||
2607 | |||
2608 | if (prev) | ||
2609 | return attempt_merge(q, prev, rq); | ||
2610 | |||
2611 | return 0; | ||
2612 | } | ||
2613 | |||
2614 | /** | ||
2615 | * blk_attempt_remerge - attempt to remerge active head with next request | ||
2616 | * @q: The &request_queue_t belonging to the device | ||
2617 | * @rq: The head request (usually) | ||
2618 | * | ||
2619 | * Description: | ||
2620 | * For head-active devices, the queue can easily be unplugged so quickly | ||
2621 | * that proper merging is not done on the front request. This may hurt | ||
2622 | * performance greatly for some devices. The block layer cannot safely | ||
2623 | * do merging on that first request for these queues, but the driver can | ||
2624 | * call this function and make it happen any way. Only the driver knows | ||
2625 | * when it is safe to do so. | ||
2626 | **/ | ||
2627 | void blk_attempt_remerge(request_queue_t *q, struct request *rq) | ||
2628 | { | ||
2629 | unsigned long flags; | ||
2630 | |||
2631 | spin_lock_irqsave(q->queue_lock, flags); | ||
2632 | attempt_back_merge(q, rq); | ||
2633 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2634 | } | ||
2635 | |||
2636 | EXPORT_SYMBOL(blk_attempt_remerge); | ||
2637 | |||
2638 | static int __make_request(request_queue_t *q, struct bio *bio) | ||
2639 | { | ||
2640 | struct request *req; | ||
2641 | int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; | ||
2642 | unsigned short prio; | ||
2643 | sector_t sector; | ||
2644 | |||
2645 | sector = bio->bi_sector; | ||
2646 | nr_sectors = bio_sectors(bio); | ||
2647 | cur_nr_sectors = bio_cur_sectors(bio); | ||
2648 | prio = bio_prio(bio); | ||
2649 | |||
2650 | rw = bio_data_dir(bio); | ||
2651 | sync = bio_sync(bio); | ||
2652 | |||
2653 | /* | ||
2654 | * low level driver can indicate that it wants pages above a | ||
2655 | * certain limit bounced to low memory (ie for highmem, or even | ||
2656 | * ISA dma in theory) | ||
2657 | */ | ||
2658 | blk_queue_bounce(q, &bio); | ||
2659 | |||
2660 | spin_lock_prefetch(q->queue_lock); | ||
2661 | |||
2662 | barrier = bio_barrier(bio); | ||
2663 | if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) { | ||
2664 | err = -EOPNOTSUPP; | ||
2665 | goto end_io; | ||
2666 | } | ||
2667 | |||
2668 | spin_lock_irq(q->queue_lock); | ||
2669 | |||
2670 | if (unlikely(barrier) || elv_queue_empty(q)) | ||
2671 | goto get_rq; | ||
2672 | |||
2673 | el_ret = elv_merge(q, &req, bio); | ||
2674 | switch (el_ret) { | ||
2675 | case ELEVATOR_BACK_MERGE: | ||
2676 | BUG_ON(!rq_mergeable(req)); | ||
2677 | |||
2678 | if (!q->back_merge_fn(q, req, bio)) | ||
2679 | break; | ||
2680 | |||
2681 | req->biotail->bi_next = bio; | ||
2682 | req->biotail = bio; | ||
2683 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
2684 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
2685 | drive_stat_acct(req, nr_sectors, 0); | ||
2686 | if (!attempt_back_merge(q, req)) | ||
2687 | elv_merged_request(q, req); | ||
2688 | goto out; | ||
2689 | |||
2690 | case ELEVATOR_FRONT_MERGE: | ||
2691 | BUG_ON(!rq_mergeable(req)); | ||
2692 | |||
2693 | if (!q->front_merge_fn(q, req, bio)) | ||
2694 | break; | ||
2695 | |||
2696 | bio->bi_next = req->bio; | ||
2697 | req->bio = bio; | ||
2698 | |||
2699 | /* | ||
2700 | * may not be valid. if the low level driver said | ||
2701 | * it didn't need a bounce buffer then it better | ||
2702 | * not touch req->buffer either... | ||
2703 | */ | ||
2704 | req->buffer = bio_data(bio); | ||
2705 | req->current_nr_sectors = cur_nr_sectors; | ||
2706 | req->hard_cur_sectors = cur_nr_sectors; | ||
2707 | req->sector = req->hard_sector = sector; | ||
2708 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
2709 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
2710 | drive_stat_acct(req, nr_sectors, 0); | ||
2711 | if (!attempt_front_merge(q, req)) | ||
2712 | elv_merged_request(q, req); | ||
2713 | goto out; | ||
2714 | |||
2715 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ | ||
2716 | default: | ||
2717 | ; | ||
2718 | } | ||
2719 | |||
2720 | get_rq: | ||
2721 | /* | ||
2722 | * Grab a free request. This is might sleep but can not fail. | ||
2723 | * Returns with the queue unlocked. | ||
2724 | */ | ||
2725 | req = get_request_wait(q, rw, bio); | ||
2726 | |||
2727 | /* | ||
2728 | * After dropping the lock and possibly sleeping here, our request | ||
2729 | * may now be mergeable after it had proven unmergeable (above). | ||
2730 | * We don't worry about that case for efficiency. It won't happen | ||
2731 | * often, and the elevators are able to handle it. | ||
2732 | */ | ||
2733 | |||
2734 | req->flags |= REQ_CMD; | ||
2735 | |||
2736 | /* | ||
2737 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) | ||
2738 | */ | ||
2739 | if (bio_rw_ahead(bio) || bio_failfast(bio)) | ||
2740 | req->flags |= REQ_FAILFAST; | ||
2741 | |||
2742 | /* | ||
2743 | * REQ_BARRIER implies no merging, but lets make it explicit | ||
2744 | */ | ||
2745 | if (unlikely(barrier)) | ||
2746 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | ||
2747 | |||
2748 | req->errors = 0; | ||
2749 | req->hard_sector = req->sector = sector; | ||
2750 | req->hard_nr_sectors = req->nr_sectors = nr_sectors; | ||
2751 | req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors; | ||
2752 | req->nr_phys_segments = bio_phys_segments(q, bio); | ||
2753 | req->nr_hw_segments = bio_hw_segments(q, bio); | ||
2754 | req->buffer = bio_data(bio); /* see ->buffer comment above */ | ||
2755 | req->waiting = NULL; | ||
2756 | req->bio = req->biotail = bio; | ||
2757 | req->ioprio = prio; | ||
2758 | req->rq_disk = bio->bi_bdev->bd_disk; | ||
2759 | req->start_time = jiffies; | ||
2760 | |||
2761 | spin_lock_irq(q->queue_lock); | ||
2762 | if (elv_queue_empty(q)) | ||
2763 | blk_plug_device(q); | ||
2764 | add_request(q, req); | ||
2765 | out: | ||
2766 | if (sync) | ||
2767 | __generic_unplug_device(q); | ||
2768 | |||
2769 | spin_unlock_irq(q->queue_lock); | ||
2770 | return 0; | ||
2771 | |||
2772 | end_io: | ||
2773 | bio_endio(bio, nr_sectors << 9, err); | ||
2774 | return 0; | ||
2775 | } | ||
2776 | |||
2777 | /* | ||
2778 | * If bio->bi_dev is a partition, remap the location | ||
2779 | */ | ||
2780 | static inline void blk_partition_remap(struct bio *bio) | ||
2781 | { | ||
2782 | struct block_device *bdev = bio->bi_bdev; | ||
2783 | |||
2784 | if (bdev != bdev->bd_contains) { | ||
2785 | struct hd_struct *p = bdev->bd_part; | ||
2786 | const int rw = bio_data_dir(bio); | ||
2787 | |||
2788 | p->sectors[rw] += bio_sectors(bio); | ||
2789 | p->ios[rw]++; | ||
2790 | |||
2791 | bio->bi_sector += p->start_sect; | ||
2792 | bio->bi_bdev = bdev->bd_contains; | ||
2793 | } | ||
2794 | } | ||
2795 | |||
2796 | static void handle_bad_sector(struct bio *bio) | ||
2797 | { | ||
2798 | char b[BDEVNAME_SIZE]; | ||
2799 | |||
2800 | printk(KERN_INFO "attempt to access beyond end of device\n"); | ||
2801 | printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | ||
2802 | bdevname(bio->bi_bdev, b), | ||
2803 | bio->bi_rw, | ||
2804 | (unsigned long long)bio->bi_sector + bio_sectors(bio), | ||
2805 | (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); | ||
2806 | |||
2807 | set_bit(BIO_EOF, &bio->bi_flags); | ||
2808 | } | ||
2809 | |||
2810 | /** | ||
2811 | * generic_make_request: hand a buffer to its device driver for I/O | ||
2812 | * @bio: The bio describing the location in memory and on the device. | ||
2813 | * | ||
2814 | * generic_make_request() is used to make I/O requests of block | ||
2815 | * devices. It is passed a &struct bio, which describes the I/O that needs | ||
2816 | * to be done. | ||
2817 | * | ||
2818 | * generic_make_request() does not return any status. The | ||
2819 | * success/failure status of the request, along with notification of | ||
2820 | * completion, is delivered asynchronously through the bio->bi_end_io | ||
2821 | * function described (one day) else where. | ||
2822 | * | ||
2823 | * The caller of generic_make_request must make sure that bi_io_vec | ||
2824 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | ||
2825 | * set to describe the device address, and the | ||
2826 | * bi_end_io and optionally bi_private are set to describe how | ||
2827 | * completion notification should be signaled. | ||
2828 | * | ||
2829 | * generic_make_request and the drivers it calls may use bi_next if this | ||
2830 | * bio happens to be merged with someone else, and may change bi_dev and | ||
2831 | * bi_sector for remaps as it sees fit. So the values of these fields | ||
2832 | * should NOT be depended on after the call to generic_make_request. | ||
2833 | */ | ||
2834 | void generic_make_request(struct bio *bio) | ||
2835 | { | ||
2836 | request_queue_t *q; | ||
2837 | sector_t maxsector; | ||
2838 | int ret, nr_sectors = bio_sectors(bio); | ||
2839 | |||
2840 | might_sleep(); | ||
2841 | /* Test device or partition size, when known. */ | ||
2842 | maxsector = bio->bi_bdev->bd_inode->i_size >> 9; | ||
2843 | if (maxsector) { | ||
2844 | sector_t sector = bio->bi_sector; | ||
2845 | |||
2846 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | ||
2847 | /* | ||
2848 | * This may well happen - the kernel calls bread() | ||
2849 | * without checking the size of the device, e.g., when | ||
2850 | * mounting a device. | ||
2851 | */ | ||
2852 | handle_bad_sector(bio); | ||
2853 | goto end_io; | ||
2854 | } | ||
2855 | } | ||
2856 | |||
2857 | /* | ||
2858 | * Resolve the mapping until finished. (drivers are | ||
2859 | * still free to implement/resolve their own stacking | ||
2860 | * by explicitly returning 0) | ||
2861 | * | ||
2862 | * NOTE: we don't repeat the blk_size check for each new device. | ||
2863 | * Stacking drivers are expected to know what they are doing. | ||
2864 | */ | ||
2865 | do { | ||
2866 | char b[BDEVNAME_SIZE]; | ||
2867 | |||
2868 | q = bdev_get_queue(bio->bi_bdev); | ||
2869 | if (!q) { | ||
2870 | printk(KERN_ERR | ||
2871 | "generic_make_request: Trying to access " | ||
2872 | "nonexistent block-device %s (%Lu)\n", | ||
2873 | bdevname(bio->bi_bdev, b), | ||
2874 | (long long) bio->bi_sector); | ||
2875 | end_io: | ||
2876 | bio_endio(bio, bio->bi_size, -EIO); | ||
2877 | break; | ||
2878 | } | ||
2879 | |||
2880 | if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) { | ||
2881 | printk("bio too big device %s (%u > %u)\n", | ||
2882 | bdevname(bio->bi_bdev, b), | ||
2883 | bio_sectors(bio), | ||
2884 | q->max_hw_sectors); | ||
2885 | goto end_io; | ||
2886 | } | ||
2887 | |||
2888 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
2889 | goto end_io; | ||
2890 | |||
2891 | /* | ||
2892 | * If this device has partitions, remap block n | ||
2893 | * of partition p to block n+start(p) of the disk. | ||
2894 | */ | ||
2895 | blk_partition_remap(bio); | ||
2896 | |||
2897 | ret = q->make_request_fn(q, bio); | ||
2898 | } while (ret); | ||
2899 | } | ||
2900 | |||
2901 | EXPORT_SYMBOL(generic_make_request); | ||
2902 | |||
2903 | /** | ||
2904 | * submit_bio: submit a bio to the block device layer for I/O | ||
2905 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | ||
2906 | * @bio: The &struct bio which describes the I/O | ||
2907 | * | ||
2908 | * submit_bio() is very similar in purpose to generic_make_request(), and | ||
2909 | * uses that function to do most of the work. Both are fairly rough | ||
2910 | * interfaces, @bio must be presetup and ready for I/O. | ||
2911 | * | ||
2912 | */ | ||
2913 | void submit_bio(int rw, struct bio *bio) | ||
2914 | { | ||
2915 | int count = bio_sectors(bio); | ||
2916 | |||
2917 | BIO_BUG_ON(!bio->bi_size); | ||
2918 | BIO_BUG_ON(!bio->bi_io_vec); | ||
2919 | bio->bi_rw |= rw; | ||
2920 | if (rw & WRITE) | ||
2921 | mod_page_state(pgpgout, count); | ||
2922 | else | ||
2923 | mod_page_state(pgpgin, count); | ||
2924 | |||
2925 | if (unlikely(block_dump)) { | ||
2926 | char b[BDEVNAME_SIZE]; | ||
2927 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", | ||
2928 | current->comm, current->pid, | ||
2929 | (rw & WRITE) ? "WRITE" : "READ", | ||
2930 | (unsigned long long)bio->bi_sector, | ||
2931 | bdevname(bio->bi_bdev,b)); | ||
2932 | } | ||
2933 | |||
2934 | generic_make_request(bio); | ||
2935 | } | ||
2936 | |||
2937 | EXPORT_SYMBOL(submit_bio); | ||
2938 | |||
2939 | static void blk_recalc_rq_segments(struct request *rq) | ||
2940 | { | ||
2941 | struct bio *bio, *prevbio = NULL; | ||
2942 | int nr_phys_segs, nr_hw_segs; | ||
2943 | unsigned int phys_size, hw_size; | ||
2944 | request_queue_t *q = rq->q; | ||
2945 | |||
2946 | if (!rq->bio) | ||
2947 | return; | ||
2948 | |||
2949 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | ||
2950 | rq_for_each_bio(bio, rq) { | ||
2951 | /* Force bio hw/phys segs to be recalculated. */ | ||
2952 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
2953 | |||
2954 | nr_phys_segs += bio_phys_segments(q, bio); | ||
2955 | nr_hw_segs += bio_hw_segments(q, bio); | ||
2956 | if (prevbio) { | ||
2957 | int pseg = phys_size + prevbio->bi_size + bio->bi_size; | ||
2958 | int hseg = hw_size + prevbio->bi_size + bio->bi_size; | ||
2959 | |||
2960 | if (blk_phys_contig_segment(q, prevbio, bio) && | ||
2961 | pseg <= q->max_segment_size) { | ||
2962 | nr_phys_segs--; | ||
2963 | phys_size += prevbio->bi_size + bio->bi_size; | ||
2964 | } else | ||
2965 | phys_size = 0; | ||
2966 | |||
2967 | if (blk_hw_contig_segment(q, prevbio, bio) && | ||
2968 | hseg <= q->max_segment_size) { | ||
2969 | nr_hw_segs--; | ||
2970 | hw_size += prevbio->bi_size + bio->bi_size; | ||
2971 | } else | ||
2972 | hw_size = 0; | ||
2973 | } | ||
2974 | prevbio = bio; | ||
2975 | } | ||
2976 | |||
2977 | rq->nr_phys_segments = nr_phys_segs; | ||
2978 | rq->nr_hw_segments = nr_hw_segs; | ||
2979 | } | ||
2980 | |||
2981 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) | ||
2982 | { | ||
2983 | if (blk_fs_request(rq)) { | ||
2984 | rq->hard_sector += nsect; | ||
2985 | rq->hard_nr_sectors -= nsect; | ||
2986 | |||
2987 | /* | ||
2988 | * Move the I/O submission pointers ahead if required. | ||
2989 | */ | ||
2990 | if ((rq->nr_sectors >= rq->hard_nr_sectors) && | ||
2991 | (rq->sector <= rq->hard_sector)) { | ||
2992 | rq->sector = rq->hard_sector; | ||
2993 | rq->nr_sectors = rq->hard_nr_sectors; | ||
2994 | rq->hard_cur_sectors = bio_cur_sectors(rq->bio); | ||
2995 | rq->current_nr_sectors = rq->hard_cur_sectors; | ||
2996 | rq->buffer = bio_data(rq->bio); | ||
2997 | } | ||
2998 | |||
2999 | /* | ||
3000 | * if total number of sectors is less than the first segment | ||
3001 | * size, something has gone terribly wrong | ||
3002 | */ | ||
3003 | if (rq->nr_sectors < rq->current_nr_sectors) { | ||
3004 | printk("blk: request botched\n"); | ||
3005 | rq->nr_sectors = rq->current_nr_sectors; | ||
3006 | } | ||
3007 | } | ||
3008 | } | ||
3009 | |||
3010 | static int __end_that_request_first(struct request *req, int uptodate, | ||
3011 | int nr_bytes) | ||
3012 | { | ||
3013 | int total_bytes, bio_nbytes, error, next_idx = 0; | ||
3014 | struct bio *bio; | ||
3015 | |||
3016 | /* | ||
3017 | * extend uptodate bool to allow < 0 value to be direct io error | ||
3018 | */ | ||
3019 | error = 0; | ||
3020 | if (end_io_error(uptodate)) | ||
3021 | error = !uptodate ? -EIO : uptodate; | ||
3022 | |||
3023 | /* | ||
3024 | * for a REQ_BLOCK_PC request, we want to carry any eventual | ||
3025 | * sense key with us all the way through | ||
3026 | */ | ||
3027 | if (!blk_pc_request(req)) | ||
3028 | req->errors = 0; | ||
3029 | |||
3030 | if (!uptodate) { | ||
3031 | if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) | ||
3032 | printk("end_request: I/O error, dev %s, sector %llu\n", | ||
3033 | req->rq_disk ? req->rq_disk->disk_name : "?", | ||
3034 | (unsigned long long)req->sector); | ||
3035 | } | ||
3036 | |||
3037 | if (blk_fs_request(req) && req->rq_disk) { | ||
3038 | const int rw = rq_data_dir(req); | ||
3039 | |||
3040 | __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); | ||
3041 | } | ||
3042 | |||
3043 | total_bytes = bio_nbytes = 0; | ||
3044 | while ((bio = req->bio) != NULL) { | ||
3045 | int nbytes; | ||
3046 | |||
3047 | if (nr_bytes >= bio->bi_size) { | ||
3048 | req->bio = bio->bi_next; | ||
3049 | nbytes = bio->bi_size; | ||
3050 | bio_endio(bio, nbytes, error); | ||
3051 | next_idx = 0; | ||
3052 | bio_nbytes = 0; | ||
3053 | } else { | ||
3054 | int idx = bio->bi_idx + next_idx; | ||
3055 | |||
3056 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | ||
3057 | blk_dump_rq_flags(req, "__end_that"); | ||
3058 | printk("%s: bio idx %d >= vcnt %d\n", | ||
3059 | __FUNCTION__, | ||
3060 | bio->bi_idx, bio->bi_vcnt); | ||
3061 | break; | ||
3062 | } | ||
3063 | |||
3064 | nbytes = bio_iovec_idx(bio, idx)->bv_len; | ||
3065 | BIO_BUG_ON(nbytes > bio->bi_size); | ||
3066 | |||
3067 | /* | ||
3068 | * not a complete bvec done | ||
3069 | */ | ||
3070 | if (unlikely(nbytes > nr_bytes)) { | ||
3071 | bio_nbytes += nr_bytes; | ||
3072 | total_bytes += nr_bytes; | ||
3073 | break; | ||
3074 | } | ||
3075 | |||
3076 | /* | ||
3077 | * advance to the next vector | ||
3078 | */ | ||
3079 | next_idx++; | ||
3080 | bio_nbytes += nbytes; | ||
3081 | } | ||
3082 | |||
3083 | total_bytes += nbytes; | ||
3084 | nr_bytes -= nbytes; | ||
3085 | |||
3086 | if ((bio = req->bio)) { | ||
3087 | /* | ||
3088 | * end more in this run, or just return 'not-done' | ||
3089 | */ | ||
3090 | if (unlikely(nr_bytes <= 0)) | ||
3091 | break; | ||
3092 | } | ||
3093 | } | ||
3094 | |||
3095 | /* | ||
3096 | * completely done | ||
3097 | */ | ||
3098 | if (!req->bio) | ||
3099 | return 0; | ||
3100 | |||
3101 | /* | ||
3102 | * if the request wasn't completed, update state | ||
3103 | */ | ||
3104 | if (bio_nbytes) { | ||
3105 | bio_endio(bio, bio_nbytes, error); | ||
3106 | bio->bi_idx += next_idx; | ||
3107 | bio_iovec(bio)->bv_offset += nr_bytes; | ||
3108 | bio_iovec(bio)->bv_len -= nr_bytes; | ||
3109 | } | ||
3110 | |||
3111 | blk_recalc_rq_sectors(req, total_bytes >> 9); | ||
3112 | blk_recalc_rq_segments(req); | ||
3113 | return 1; | ||
3114 | } | ||
3115 | |||
3116 | /** | ||
3117 | * end_that_request_first - end I/O on a request | ||
3118 | * @req: the request being processed | ||
3119 | * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error | ||
3120 | * @nr_sectors: number of sectors to end I/O on | ||
3121 | * | ||
3122 | * Description: | ||
3123 | * Ends I/O on a number of sectors attached to @req, and sets it up | ||
3124 | * for the next range of segments (if any) in the cluster. | ||
3125 | * | ||
3126 | * Return: | ||
3127 | * 0 - we are done with this request, call end_that_request_last() | ||
3128 | * 1 - still buffers pending for this request | ||
3129 | **/ | ||
3130 | int end_that_request_first(struct request *req, int uptodate, int nr_sectors) | ||
3131 | { | ||
3132 | return __end_that_request_first(req, uptodate, nr_sectors << 9); | ||
3133 | } | ||
3134 | |||
3135 | EXPORT_SYMBOL(end_that_request_first); | ||
3136 | |||
3137 | /** | ||
3138 | * end_that_request_chunk - end I/O on a request | ||
3139 | * @req: the request being processed | ||
3140 | * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error | ||
3141 | * @nr_bytes: number of bytes to complete | ||
3142 | * | ||
3143 | * Description: | ||
3144 | * Ends I/O on a number of bytes attached to @req, and sets it up | ||
3145 | * for the next range of segments (if any). Like end_that_request_first(), | ||
3146 | * but deals with bytes instead of sectors. | ||
3147 | * | ||
3148 | * Return: | ||
3149 | * 0 - we are done with this request, call end_that_request_last() | ||
3150 | * 1 - still buffers pending for this request | ||
3151 | **/ | ||
3152 | int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes) | ||
3153 | { | ||
3154 | return __end_that_request_first(req, uptodate, nr_bytes); | ||
3155 | } | ||
3156 | |||
3157 | EXPORT_SYMBOL(end_that_request_chunk); | ||
3158 | |||
3159 | /* | ||
3160 | * queue lock must be held | ||
3161 | */ | ||
3162 | void end_that_request_last(struct request *req) | ||
3163 | { | ||
3164 | struct gendisk *disk = req->rq_disk; | ||
3165 | |||
3166 | if (unlikely(laptop_mode) && blk_fs_request(req)) | ||
3167 | laptop_io_completion(); | ||
3168 | |||
3169 | if (disk && blk_fs_request(req)) { | ||
3170 | unsigned long duration = jiffies - req->start_time; | ||
3171 | const int rw = rq_data_dir(req); | ||
3172 | |||
3173 | __disk_stat_inc(disk, ios[rw]); | ||
3174 | __disk_stat_add(disk, ticks[rw], duration); | ||
3175 | disk_round_stats(disk); | ||
3176 | disk->in_flight--; | ||
3177 | } | ||
3178 | if (req->end_io) | ||
3179 | req->end_io(req); | ||
3180 | else | ||
3181 | __blk_put_request(req->q, req); | ||
3182 | } | ||
3183 | |||
3184 | EXPORT_SYMBOL(end_that_request_last); | ||
3185 | |||
3186 | void end_request(struct request *req, int uptodate) | ||
3187 | { | ||
3188 | if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { | ||
3189 | add_disk_randomness(req->rq_disk); | ||
3190 | blkdev_dequeue_request(req); | ||
3191 | end_that_request_last(req); | ||
3192 | } | ||
3193 | } | ||
3194 | |||
3195 | EXPORT_SYMBOL(end_request); | ||
3196 | |||
3197 | void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) | ||
3198 | { | ||
3199 | /* first three bits are identical in rq->flags and bio->bi_rw */ | ||
3200 | rq->flags |= (bio->bi_rw & 7); | ||
3201 | |||
3202 | rq->nr_phys_segments = bio_phys_segments(q, bio); | ||
3203 | rq->nr_hw_segments = bio_hw_segments(q, bio); | ||
3204 | rq->current_nr_sectors = bio_cur_sectors(bio); | ||
3205 | rq->hard_cur_sectors = rq->current_nr_sectors; | ||
3206 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | ||
3207 | rq->buffer = bio_data(bio); | ||
3208 | |||
3209 | rq->bio = rq->biotail = bio; | ||
3210 | } | ||
3211 | |||
3212 | EXPORT_SYMBOL(blk_rq_bio_prep); | ||
3213 | |||
3214 | int kblockd_schedule_work(struct work_struct *work) | ||
3215 | { | ||
3216 | return queue_work(kblockd_workqueue, work); | ||
3217 | } | ||
3218 | |||
3219 | EXPORT_SYMBOL(kblockd_schedule_work); | ||
3220 | |||
3221 | void kblockd_flush(void) | ||
3222 | { | ||
3223 | flush_workqueue(kblockd_workqueue); | ||
3224 | } | ||
3225 | EXPORT_SYMBOL(kblockd_flush); | ||
3226 | |||
3227 | int __init blk_dev_init(void) | ||
3228 | { | ||
3229 | kblockd_workqueue = create_workqueue("kblockd"); | ||
3230 | if (!kblockd_workqueue) | ||
3231 | panic("Failed to create kblockd\n"); | ||
3232 | |||
3233 | request_cachep = kmem_cache_create("blkdev_requests", | ||
3234 | sizeof(struct request), 0, SLAB_PANIC, NULL, NULL); | ||
3235 | |||
3236 | requestq_cachep = kmem_cache_create("blkdev_queue", | ||
3237 | sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL); | ||
3238 | |||
3239 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | ||
3240 | sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); | ||
3241 | |||
3242 | blk_max_low_pfn = max_low_pfn; | ||
3243 | blk_max_pfn = max_pfn; | ||
3244 | |||
3245 | return 0; | ||
3246 | } | ||
3247 | |||
3248 | /* | ||
3249 | * IO Context helper functions | ||
3250 | */ | ||
3251 | void put_io_context(struct io_context *ioc) | ||
3252 | { | ||
3253 | if (ioc == NULL) | ||
3254 | return; | ||
3255 | |||
3256 | BUG_ON(atomic_read(&ioc->refcount) == 0); | ||
3257 | |||
3258 | if (atomic_dec_and_test(&ioc->refcount)) { | ||
3259 | if (ioc->aic && ioc->aic->dtor) | ||
3260 | ioc->aic->dtor(ioc->aic); | ||
3261 | if (ioc->cic && ioc->cic->dtor) | ||
3262 | ioc->cic->dtor(ioc->cic); | ||
3263 | |||
3264 | kmem_cache_free(iocontext_cachep, ioc); | ||
3265 | } | ||
3266 | } | ||
3267 | EXPORT_SYMBOL(put_io_context); | ||
3268 | |||
3269 | /* Called by the exitting task */ | ||
3270 | void exit_io_context(void) | ||
3271 | { | ||
3272 | unsigned long flags; | ||
3273 | struct io_context *ioc; | ||
3274 | |||
3275 | local_irq_save(flags); | ||
3276 | task_lock(current); | ||
3277 | ioc = current->io_context; | ||
3278 | current->io_context = NULL; | ||
3279 | ioc->task = NULL; | ||
3280 | task_unlock(current); | ||
3281 | local_irq_restore(flags); | ||
3282 | |||
3283 | if (ioc->aic && ioc->aic->exit) | ||
3284 | ioc->aic->exit(ioc->aic); | ||
3285 | if (ioc->cic && ioc->cic->exit) | ||
3286 | ioc->cic->exit(ioc->cic); | ||
3287 | |||
3288 | put_io_context(ioc); | ||
3289 | } | ||
3290 | |||
3291 | /* | ||
3292 | * If the current task has no IO context then create one and initialise it. | ||
3293 | * Otherwise, return its existing IO context. | ||
3294 | * | ||
3295 | * This returned IO context doesn't have a specifically elevated refcount, | ||
3296 | * but since the current task itself holds a reference, the context can be | ||
3297 | * used in general code, so long as it stays within `current` context. | ||
3298 | */ | ||
3299 | struct io_context *current_io_context(gfp_t gfp_flags) | ||
3300 | { | ||
3301 | struct task_struct *tsk = current; | ||
3302 | struct io_context *ret; | ||
3303 | |||
3304 | ret = tsk->io_context; | ||
3305 | if (likely(ret)) | ||
3306 | return ret; | ||
3307 | |||
3308 | ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); | ||
3309 | if (ret) { | ||
3310 | atomic_set(&ret->refcount, 1); | ||
3311 | ret->task = current; | ||
3312 | ret->set_ioprio = NULL; | ||
3313 | ret->last_waited = jiffies; /* doesn't matter... */ | ||
3314 | ret->nr_batch_requests = 0; /* because this is 0 */ | ||
3315 | ret->aic = NULL; | ||
3316 | ret->cic = NULL; | ||
3317 | tsk->io_context = ret; | ||
3318 | } | ||
3319 | |||
3320 | return ret; | ||
3321 | } | ||
3322 | EXPORT_SYMBOL(current_io_context); | ||
3323 | |||
3324 | /* | ||
3325 | * If the current task has no IO context then create one and initialise it. | ||
3326 | * If it does have a context, take a ref on it. | ||
3327 | * | ||
3328 | * This is always called in the context of the task which submitted the I/O. | ||
3329 | */ | ||
3330 | struct io_context *get_io_context(gfp_t gfp_flags) | ||
3331 | { | ||
3332 | struct io_context *ret; | ||
3333 | ret = current_io_context(gfp_flags); | ||
3334 | if (likely(ret)) | ||
3335 | atomic_inc(&ret->refcount); | ||
3336 | return ret; | ||
3337 | } | ||
3338 | EXPORT_SYMBOL(get_io_context); | ||
3339 | |||
3340 | void copy_io_context(struct io_context **pdst, struct io_context **psrc) | ||
3341 | { | ||
3342 | struct io_context *src = *psrc; | ||
3343 | struct io_context *dst = *pdst; | ||
3344 | |||
3345 | if (src) { | ||
3346 | BUG_ON(atomic_read(&src->refcount) == 0); | ||
3347 | atomic_inc(&src->refcount); | ||
3348 | put_io_context(dst); | ||
3349 | *pdst = src; | ||
3350 | } | ||
3351 | } | ||
3352 | EXPORT_SYMBOL(copy_io_context); | ||
3353 | |||
3354 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2) | ||
3355 | { | ||
3356 | struct io_context *temp; | ||
3357 | temp = *ioc1; | ||
3358 | *ioc1 = *ioc2; | ||
3359 | *ioc2 = temp; | ||
3360 | } | ||
3361 | EXPORT_SYMBOL(swap_io_context); | ||
3362 | |||
3363 | /* | ||
3364 | * sysfs parts below | ||
3365 | */ | ||
3366 | struct queue_sysfs_entry { | ||
3367 | struct attribute attr; | ||
3368 | ssize_t (*show)(struct request_queue *, char *); | ||
3369 | ssize_t (*store)(struct request_queue *, const char *, size_t); | ||
3370 | }; | ||
3371 | |||
3372 | static ssize_t | ||
3373 | queue_var_show(unsigned int var, char *page) | ||
3374 | { | ||
3375 | return sprintf(page, "%d\n", var); | ||
3376 | } | ||
3377 | |||
3378 | static ssize_t | ||
3379 | queue_var_store(unsigned long *var, const char *page, size_t count) | ||
3380 | { | ||
3381 | char *p = (char *) page; | ||
3382 | |||
3383 | *var = simple_strtoul(p, &p, 10); | ||
3384 | return count; | ||
3385 | } | ||
3386 | |||
3387 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | ||
3388 | { | ||
3389 | return queue_var_show(q->nr_requests, (page)); | ||
3390 | } | ||
3391 | |||
3392 | static ssize_t | ||
3393 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | ||
3394 | { | ||
3395 | struct request_list *rl = &q->rq; | ||
3396 | |||
3397 | int ret = queue_var_store(&q->nr_requests, page, count); | ||
3398 | if (q->nr_requests < BLKDEV_MIN_RQ) | ||
3399 | q->nr_requests = BLKDEV_MIN_RQ; | ||
3400 | blk_queue_congestion_threshold(q); | ||
3401 | |||
3402 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | ||
3403 | set_queue_congested(q, READ); | ||
3404 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) | ||
3405 | clear_queue_congested(q, READ); | ||
3406 | |||
3407 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) | ||
3408 | set_queue_congested(q, WRITE); | ||
3409 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) | ||
3410 | clear_queue_congested(q, WRITE); | ||
3411 | |||
3412 | if (rl->count[READ] >= q->nr_requests) { | ||
3413 | blk_set_queue_full(q, READ); | ||
3414 | } else if (rl->count[READ]+1 <= q->nr_requests) { | ||
3415 | blk_clear_queue_full(q, READ); | ||
3416 | wake_up(&rl->wait[READ]); | ||
3417 | } | ||
3418 | |||
3419 | if (rl->count[WRITE] >= q->nr_requests) { | ||
3420 | blk_set_queue_full(q, WRITE); | ||
3421 | } else if (rl->count[WRITE]+1 <= q->nr_requests) { | ||
3422 | blk_clear_queue_full(q, WRITE); | ||
3423 | wake_up(&rl->wait[WRITE]); | ||
3424 | } | ||
3425 | return ret; | ||
3426 | } | ||
3427 | |||
3428 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | ||
3429 | { | ||
3430 | int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); | ||
3431 | |||
3432 | return queue_var_show(ra_kb, (page)); | ||
3433 | } | ||
3434 | |||
3435 | static ssize_t | ||
3436 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | ||
3437 | { | ||
3438 | unsigned long ra_kb; | ||
3439 | ssize_t ret = queue_var_store(&ra_kb, page, count); | ||
3440 | |||
3441 | spin_lock_irq(q->queue_lock); | ||
3442 | if (ra_kb > (q->max_sectors >> 1)) | ||
3443 | ra_kb = (q->max_sectors >> 1); | ||
3444 | |||
3445 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); | ||
3446 | spin_unlock_irq(q->queue_lock); | ||
3447 | |||
3448 | return ret; | ||
3449 | } | ||
3450 | |||
3451 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | ||
3452 | { | ||
3453 | int max_sectors_kb = q->max_sectors >> 1; | ||
3454 | |||
3455 | return queue_var_show(max_sectors_kb, (page)); | ||
3456 | } | ||
3457 | |||
3458 | static ssize_t | ||
3459 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | ||
3460 | { | ||
3461 | unsigned long max_sectors_kb, | ||
3462 | max_hw_sectors_kb = q->max_hw_sectors >> 1, | ||
3463 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); | ||
3464 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | ||
3465 | int ra_kb; | ||
3466 | |||
3467 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | ||
3468 | return -EINVAL; | ||
3469 | /* | ||
3470 | * Take the queue lock to update the readahead and max_sectors | ||
3471 | * values synchronously: | ||
3472 | */ | ||
3473 | spin_lock_irq(q->queue_lock); | ||
3474 | /* | ||
3475 | * Trim readahead window as well, if necessary: | ||
3476 | */ | ||
3477 | ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); | ||
3478 | if (ra_kb > max_sectors_kb) | ||
3479 | q->backing_dev_info.ra_pages = | ||
3480 | max_sectors_kb >> (PAGE_CACHE_SHIFT - 10); | ||
3481 | |||
3482 | q->max_sectors = max_sectors_kb << 1; | ||
3483 | spin_unlock_irq(q->queue_lock); | ||
3484 | |||
3485 | return ret; | ||
3486 | } | ||
3487 | |||
3488 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | ||
3489 | { | ||
3490 | int max_hw_sectors_kb = q->max_hw_sectors >> 1; | ||
3491 | |||
3492 | return queue_var_show(max_hw_sectors_kb, (page)); | ||
3493 | } | ||
3494 | |||
3495 | |||
3496 | static struct queue_sysfs_entry queue_requests_entry = { | ||
3497 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | ||
3498 | .show = queue_requests_show, | ||
3499 | .store = queue_requests_store, | ||
3500 | }; | ||
3501 | |||
3502 | static struct queue_sysfs_entry queue_ra_entry = { | ||
3503 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | ||
3504 | .show = queue_ra_show, | ||
3505 | .store = queue_ra_store, | ||
3506 | }; | ||
3507 | |||
3508 | static struct queue_sysfs_entry queue_max_sectors_entry = { | ||
3509 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | ||
3510 | .show = queue_max_sectors_show, | ||
3511 | .store = queue_max_sectors_store, | ||
3512 | }; | ||
3513 | |||
3514 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | ||
3515 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | ||
3516 | .show = queue_max_hw_sectors_show, | ||
3517 | }; | ||
3518 | |||
3519 | static struct queue_sysfs_entry queue_iosched_entry = { | ||
3520 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | ||
3521 | .show = elv_iosched_show, | ||
3522 | .store = elv_iosched_store, | ||
3523 | }; | ||
3524 | |||
3525 | static struct attribute *default_attrs[] = { | ||
3526 | &queue_requests_entry.attr, | ||
3527 | &queue_ra_entry.attr, | ||
3528 | &queue_max_hw_sectors_entry.attr, | ||
3529 | &queue_max_sectors_entry.attr, | ||
3530 | &queue_iosched_entry.attr, | ||
3531 | NULL, | ||
3532 | }; | ||
3533 | |||
3534 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | ||
3535 | |||
3536 | static ssize_t | ||
3537 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
3538 | { | ||
3539 | struct queue_sysfs_entry *entry = to_queue(attr); | ||
3540 | struct request_queue *q; | ||
3541 | |||
3542 | q = container_of(kobj, struct request_queue, kobj); | ||
3543 | if (!entry->show) | ||
3544 | return -EIO; | ||
3545 | |||
3546 | return entry->show(q, page); | ||
3547 | } | ||
3548 | |||
3549 | static ssize_t | ||
3550 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | ||
3551 | const char *page, size_t length) | ||
3552 | { | ||
3553 | struct queue_sysfs_entry *entry = to_queue(attr); | ||
3554 | struct request_queue *q; | ||
3555 | |||
3556 | q = container_of(kobj, struct request_queue, kobj); | ||
3557 | if (!entry->store) | ||
3558 | return -EIO; | ||
3559 | |||
3560 | return entry->store(q, page, length); | ||
3561 | } | ||
3562 | |||
3563 | static struct sysfs_ops queue_sysfs_ops = { | ||
3564 | .show = queue_attr_show, | ||
3565 | .store = queue_attr_store, | ||
3566 | }; | ||
3567 | |||
3568 | static struct kobj_type queue_ktype = { | ||
3569 | .sysfs_ops = &queue_sysfs_ops, | ||
3570 | .default_attrs = default_attrs, | ||
3571 | }; | ||
3572 | |||
3573 | int blk_register_queue(struct gendisk *disk) | ||
3574 | { | ||
3575 | int ret; | ||
3576 | |||
3577 | request_queue_t *q = disk->queue; | ||
3578 | |||
3579 | if (!q || !q->request_fn) | ||
3580 | return -ENXIO; | ||
3581 | |||
3582 | q->kobj.parent = kobject_get(&disk->kobj); | ||
3583 | if (!q->kobj.parent) | ||
3584 | return -EBUSY; | ||
3585 | |||
3586 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | ||
3587 | q->kobj.ktype = &queue_ktype; | ||
3588 | |||
3589 | ret = kobject_register(&q->kobj); | ||
3590 | if (ret < 0) | ||
3591 | return ret; | ||
3592 | |||
3593 | ret = elv_register_queue(q); | ||
3594 | if (ret) { | ||
3595 | kobject_unregister(&q->kobj); | ||
3596 | return ret; | ||
3597 | } | ||
3598 | |||
3599 | return 0; | ||
3600 | } | ||
3601 | |||
3602 | void blk_unregister_queue(struct gendisk *disk) | ||
3603 | { | ||
3604 | request_queue_t *q = disk->queue; | ||
3605 | |||
3606 | if (q && q->request_fn) { | ||
3607 | elv_unregister_queue(q); | ||
3608 | |||
3609 | kobject_unregister(&q->kobj); | ||
3610 | kobject_put(&disk->kobj); | ||
3611 | } | ||
3612 | } | ||