diff options
-rw-r--r-- | block/Makefile | 5 | ||||
-rw-r--r-- | block/blk-barrier.c | 319 | ||||
-rw-r--r-- | block/blk-core.c | 1255 | ||||
-rw-r--r-- | block/blk-exec.c | 105 | ||||
-rw-r--r-- | block/blk-ioc.c | 194 | ||||
-rw-r--r-- | block/blk-map.c | 264 | ||||
-rw-r--r-- | block/blk-settings.c | 402 | ||||
-rw-r--r-- | block/blk.h | 17 |
8 files changed, 1312 insertions, 1249 deletions
diff --git a/block/Makefile b/block/Makefile index fcaae4ae6704..2002046d0a9e 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -2,8 +2,9 @@ | |||
2 | # Makefile for the kernel block layer | 2 | # Makefile for the kernel block layer |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o ioctl.o \ | 5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ |
6 | genhd.o scsi_ioctl.o | 6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ |
7 | blk-exec.o ioctl.o genhd.o scsi_ioctl.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o | 9 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o |
9 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o | 10 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o |
diff --git a/block/blk-barrier.c b/block/blk-barrier.c new file mode 100644 index 000000000000..5f74fec327d5 --- /dev/null +++ b/block/blk-barrier.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * Functions related to barrier IO handling | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | #include "blk.h" | ||
10 | |||
11 | /** | ||
12 | * blk_queue_ordered - does this queue support ordered writes | ||
13 | * @q: the request queue | ||
14 | * @ordered: one of QUEUE_ORDERED_* | ||
15 | * @prepare_flush_fn: rq setup helper for cache flush ordered writes | ||
16 | * | ||
17 | * Description: | ||
18 | * For journalled file systems, doing ordered writes on a commit | ||
19 | * block instead of explicitly doing wait_on_buffer (which is bad | ||
20 | * for performance) can be a big win. Block drivers supporting this | ||
21 | * feature should call this function and indicate so. | ||
22 | * | ||
23 | **/ | ||
24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, | ||
25 | prepare_flush_fn *prepare_flush_fn) | ||
26 | { | ||
27 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | ||
28 | prepare_flush_fn == NULL) { | ||
29 | printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); | ||
30 | return -EINVAL; | ||
31 | } | ||
32 | |||
33 | if (ordered != QUEUE_ORDERED_NONE && | ||
34 | ordered != QUEUE_ORDERED_DRAIN && | ||
35 | ordered != QUEUE_ORDERED_DRAIN_FLUSH && | ||
36 | ordered != QUEUE_ORDERED_DRAIN_FUA && | ||
37 | ordered != QUEUE_ORDERED_TAG && | ||
38 | ordered != QUEUE_ORDERED_TAG_FLUSH && | ||
39 | ordered != QUEUE_ORDERED_TAG_FUA) { | ||
40 | printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); | ||
41 | return -EINVAL; | ||
42 | } | ||
43 | |||
44 | q->ordered = ordered; | ||
45 | q->next_ordered = ordered; | ||
46 | q->prepare_flush_fn = prepare_flush_fn; | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | EXPORT_SYMBOL(blk_queue_ordered); | ||
52 | |||
53 | /* | ||
54 | * Cache flushing for ordered writes handling | ||
55 | */ | ||
56 | inline unsigned blk_ordered_cur_seq(struct request_queue *q) | ||
57 | { | ||
58 | if (!q->ordseq) | ||
59 | return 0; | ||
60 | return 1 << ffz(q->ordseq); | ||
61 | } | ||
62 | |||
63 | unsigned blk_ordered_req_seq(struct request *rq) | ||
64 | { | ||
65 | struct request_queue *q = rq->q; | ||
66 | |||
67 | BUG_ON(q->ordseq == 0); | ||
68 | |||
69 | if (rq == &q->pre_flush_rq) | ||
70 | return QUEUE_ORDSEQ_PREFLUSH; | ||
71 | if (rq == &q->bar_rq) | ||
72 | return QUEUE_ORDSEQ_BAR; | ||
73 | if (rq == &q->post_flush_rq) | ||
74 | return QUEUE_ORDSEQ_POSTFLUSH; | ||
75 | |||
76 | /* | ||
77 | * !fs requests don't need to follow barrier ordering. Always | ||
78 | * put them at the front. This fixes the following deadlock. | ||
79 | * | ||
80 | * http://thread.gmane.org/gmane.linux.kernel/537473 | ||
81 | */ | ||
82 | if (!blk_fs_request(rq)) | ||
83 | return QUEUE_ORDSEQ_DRAIN; | ||
84 | |||
85 | if ((rq->cmd_flags & REQ_ORDERED_COLOR) == | ||
86 | (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) | ||
87 | return QUEUE_ORDSEQ_DRAIN; | ||
88 | else | ||
89 | return QUEUE_ORDSEQ_DONE; | ||
90 | } | ||
91 | |||
92 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | ||
93 | { | ||
94 | struct request *rq; | ||
95 | |||
96 | if (error && !q->orderr) | ||
97 | q->orderr = error; | ||
98 | |||
99 | BUG_ON(q->ordseq & seq); | ||
100 | q->ordseq |= seq; | ||
101 | |||
102 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) | ||
103 | return; | ||
104 | |||
105 | /* | ||
106 | * Okay, sequence complete. | ||
107 | */ | ||
108 | q->ordseq = 0; | ||
109 | rq = q->orig_bar_rq; | ||
110 | |||
111 | if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) | ||
112 | BUG(); | ||
113 | } | ||
114 | |||
115 | static void pre_flush_end_io(struct request *rq, int error) | ||
116 | { | ||
117 | elv_completed_request(rq->q, rq); | ||
118 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); | ||
119 | } | ||
120 | |||
121 | static void bar_end_io(struct request *rq, int error) | ||
122 | { | ||
123 | elv_completed_request(rq->q, rq); | ||
124 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); | ||
125 | } | ||
126 | |||
127 | static void post_flush_end_io(struct request *rq, int error) | ||
128 | { | ||
129 | elv_completed_request(rq->q, rq); | ||
130 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | ||
131 | } | ||
132 | |||
133 | static void queue_flush(struct request_queue *q, unsigned which) | ||
134 | { | ||
135 | struct request *rq; | ||
136 | rq_end_io_fn *end_io; | ||
137 | |||
138 | if (which == QUEUE_ORDERED_PREFLUSH) { | ||
139 | rq = &q->pre_flush_rq; | ||
140 | end_io = pre_flush_end_io; | ||
141 | } else { | ||
142 | rq = &q->post_flush_rq; | ||
143 | end_io = post_flush_end_io; | ||
144 | } | ||
145 | |||
146 | rq->cmd_flags = REQ_HARDBARRIER; | ||
147 | rq_init(q, rq); | ||
148 | rq->elevator_private = NULL; | ||
149 | rq->elevator_private2 = NULL; | ||
150 | rq->rq_disk = q->bar_rq.rq_disk; | ||
151 | rq->end_io = end_io; | ||
152 | q->prepare_flush_fn(q, rq); | ||
153 | |||
154 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
155 | } | ||
156 | |||
157 | static inline struct request *start_ordered(struct request_queue *q, | ||
158 | struct request *rq) | ||
159 | { | ||
160 | q->orderr = 0; | ||
161 | q->ordered = q->next_ordered; | ||
162 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | ||
163 | |||
164 | /* | ||
165 | * Prep proxy barrier request. | ||
166 | */ | ||
167 | blkdev_dequeue_request(rq); | ||
168 | q->orig_bar_rq = rq; | ||
169 | rq = &q->bar_rq; | ||
170 | rq->cmd_flags = 0; | ||
171 | rq_init(q, rq); | ||
172 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
173 | rq->cmd_flags |= REQ_RW; | ||
174 | if (q->ordered & QUEUE_ORDERED_FUA) | ||
175 | rq->cmd_flags |= REQ_FUA; | ||
176 | rq->elevator_private = NULL; | ||
177 | rq->elevator_private2 = NULL; | ||
178 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
179 | rq->end_io = bar_end_io; | ||
180 | |||
181 | /* | ||
182 | * Queue ordered sequence. As we stack them at the head, we | ||
183 | * need to queue in reverse order. Note that we rely on that | ||
184 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs | ||
185 | * request gets inbetween ordered sequence. If this request is | ||
186 | * an empty barrier, we don't need to do a postflush ever since | ||
187 | * there will be no data written between the pre and post flush. | ||
188 | * Hence a single flush will suffice. | ||
189 | */ | ||
190 | if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) | ||
191 | queue_flush(q, QUEUE_ORDERED_POSTFLUSH); | ||
192 | else | ||
193 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; | ||
194 | |||
195 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
196 | |||
197 | if (q->ordered & QUEUE_ORDERED_PREFLUSH) { | ||
198 | queue_flush(q, QUEUE_ORDERED_PREFLUSH); | ||
199 | rq = &q->pre_flush_rq; | ||
200 | } else | ||
201 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; | ||
202 | |||
203 | if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) | ||
204 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | ||
205 | else | ||
206 | rq = NULL; | ||
207 | |||
208 | return rq; | ||
209 | } | ||
210 | |||
211 | int blk_do_ordered(struct request_queue *q, struct request **rqp) | ||
212 | { | ||
213 | struct request *rq = *rqp; | ||
214 | const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); | ||
215 | |||
216 | if (!q->ordseq) { | ||
217 | if (!is_barrier) | ||
218 | return 1; | ||
219 | |||
220 | if (q->next_ordered != QUEUE_ORDERED_NONE) { | ||
221 | *rqp = start_ordered(q, rq); | ||
222 | return 1; | ||
223 | } else { | ||
224 | /* | ||
225 | * This can happen when the queue switches to | ||
226 | * ORDERED_NONE while this request is on it. | ||
227 | */ | ||
228 | blkdev_dequeue_request(rq); | ||
229 | if (__blk_end_request(rq, -EOPNOTSUPP, | ||
230 | blk_rq_bytes(rq))) | ||
231 | BUG(); | ||
232 | *rqp = NULL; | ||
233 | return 0; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Ordered sequence in progress | ||
239 | */ | ||
240 | |||
241 | /* Special requests are not subject to ordering rules. */ | ||
242 | if (!blk_fs_request(rq) && | ||
243 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) | ||
244 | return 1; | ||
245 | |||
246 | if (q->ordered & QUEUE_ORDERED_TAG) { | ||
247 | /* Ordered by tag. Blocking the next barrier is enough. */ | ||
248 | if (is_barrier && rq != &q->bar_rq) | ||
249 | *rqp = NULL; | ||
250 | } else { | ||
251 | /* Ordered by draining. Wait for turn. */ | ||
252 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); | ||
253 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) | ||
254 | *rqp = NULL; | ||
255 | } | ||
256 | |||
257 | return 1; | ||
258 | } | ||
259 | |||
260 | static void bio_end_empty_barrier(struct bio *bio, int err) | ||
261 | { | ||
262 | if (err) | ||
263 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
264 | |||
265 | complete(bio->bi_private); | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * blkdev_issue_flush - queue a flush | ||
270 | * @bdev: blockdev to issue flush for | ||
271 | * @error_sector: error sector | ||
272 | * | ||
273 | * Description: | ||
274 | * Issue a flush for the block device in question. Caller can supply | ||
275 | * room for storing the error offset in case of a flush error, if they | ||
276 | * wish to. Caller must run wait_for_completion() on its own. | ||
277 | */ | ||
278 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | ||
279 | { | ||
280 | DECLARE_COMPLETION_ONSTACK(wait); | ||
281 | struct request_queue *q; | ||
282 | struct bio *bio; | ||
283 | int ret; | ||
284 | |||
285 | if (bdev->bd_disk == NULL) | ||
286 | return -ENXIO; | ||
287 | |||
288 | q = bdev_get_queue(bdev); | ||
289 | if (!q) | ||
290 | return -ENXIO; | ||
291 | |||
292 | bio = bio_alloc(GFP_KERNEL, 0); | ||
293 | if (!bio) | ||
294 | return -ENOMEM; | ||
295 | |||
296 | bio->bi_end_io = bio_end_empty_barrier; | ||
297 | bio->bi_private = &wait; | ||
298 | bio->bi_bdev = bdev; | ||
299 | submit_bio(1 << BIO_RW_BARRIER, bio); | ||
300 | |||
301 | wait_for_completion(&wait); | ||
302 | |||
303 | /* | ||
304 | * The driver must store the error location in ->bi_sector, if | ||
305 | * it supports it. For non-stacked drivers, this should be copied | ||
306 | * from rq->sector. | ||
307 | */ | ||
308 | if (error_sector) | ||
309 | *error_sector = bio->bi_sector; | ||
310 | |||
311 | ret = 0; | ||
312 | if (!bio_flagged(bio, BIO_UPTODATE)) | ||
313 | ret = -EIO; | ||
314 | |||
315 | bio_put(bio); | ||
316 | return ret; | ||
317 | } | ||
318 | |||
319 | EXPORT_SYMBOL(blkdev_issue_flush); | ||
diff --git a/block/blk-core.c b/block/blk-core.c index 937f9d0b9bd5..2c73ed1a8131 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/kernel_stat.h> | 20 | #include <linux/kernel_stat.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | ||
24 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
25 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
26 | #include <linux/swap.h> | 25 | #include <linux/swap.h> |
@@ -34,20 +33,9 @@ | |||
34 | 33 | ||
35 | #include "blk.h" | 34 | #include "blk.h" |
36 | 35 | ||
37 | /* | ||
38 | * for max sense size | ||
39 | */ | ||
40 | #include <scsi/scsi_cmnd.h> | ||
41 | |||
42 | static void blk_unplug_work(struct work_struct *work); | ||
43 | static void blk_unplug_timeout(unsigned long data); | ||
44 | static void drive_stat_acct(struct request *rq, int new_io); | 36 | static void drive_stat_acct(struct request *rq, int new_io); |
45 | static void init_request_from_bio(struct request *req, struct bio *bio); | ||
46 | static int __make_request(struct request_queue *q, struct bio *bio); | 37 | static int __make_request(struct request_queue *q, struct bio *bio); |
47 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | ||
48 | static void blk_recalc_rq_segments(struct request *rq); | 38 | static void blk_recalc_rq_segments(struct request *rq); |
49 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | ||
50 | struct bio *bio); | ||
51 | 39 | ||
52 | /* | 40 | /* |
53 | * For the allocated request tables | 41 | * For the allocated request tables |
@@ -60,28 +48,12 @@ struct kmem_cache *request_cachep; | |||
60 | struct kmem_cache *blk_requestq_cachep = NULL; | 48 | struct kmem_cache *blk_requestq_cachep = NULL; |
61 | 49 | ||
62 | /* | 50 | /* |
63 | * For io context allocations | ||
64 | */ | ||
65 | static struct kmem_cache *iocontext_cachep; | ||
66 | |||
67 | /* | ||
68 | * Controlling structure to kblockd | 51 | * Controlling structure to kblockd |
69 | */ | 52 | */ |
70 | static struct workqueue_struct *kblockd_workqueue; | 53 | static struct workqueue_struct *kblockd_workqueue; |
71 | 54 | ||
72 | unsigned long blk_max_low_pfn, blk_max_pfn; | ||
73 | |||
74 | EXPORT_SYMBOL(blk_max_low_pfn); | ||
75 | EXPORT_SYMBOL(blk_max_pfn); | ||
76 | |||
77 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | 55 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); |
78 | 56 | ||
79 | /* Amount of time in which a process may batch requests */ | ||
80 | #define BLK_BATCH_TIME (HZ/50UL) | ||
81 | |||
82 | /* Number of requests a "batching" process may submit */ | ||
83 | #define BLK_BATCH_REQ 32 | ||
84 | |||
85 | void blk_queue_congestion_threshold(struct request_queue *q) | 57 | void blk_queue_congestion_threshold(struct request_queue *q) |
86 | { | 58 | { |
87 | int nr; | 59 | int nr; |
@@ -117,113 +89,7 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | |||
117 | } | 89 | } |
118 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 90 | EXPORT_SYMBOL(blk_get_backing_dev_info); |
119 | 91 | ||
120 | /** | 92 | void rq_init(struct request_queue *q, struct request *rq) |
121 | * blk_queue_prep_rq - set a prepare_request function for queue | ||
122 | * @q: queue | ||
123 | * @pfn: prepare_request function | ||
124 | * | ||
125 | * It's possible for a queue to register a prepare_request callback which | ||
126 | * is invoked before the request is handed to the request_fn. The goal of | ||
127 | * the function is to prepare a request for I/O, it can be used to build a | ||
128 | * cdb from the request data for instance. | ||
129 | * | ||
130 | */ | ||
131 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | ||
132 | { | ||
133 | q->prep_rq_fn = pfn; | ||
134 | } | ||
135 | |||
136 | EXPORT_SYMBOL(blk_queue_prep_rq); | ||
137 | |||
138 | /** | ||
139 | * blk_queue_merge_bvec - set a merge_bvec function for queue | ||
140 | * @q: queue | ||
141 | * @mbfn: merge_bvec_fn | ||
142 | * | ||
143 | * Usually queues have static limitations on the max sectors or segments that | ||
144 | * we can put in a request. Stacking drivers may have some settings that | ||
145 | * are dynamic, and thus we have to query the queue whether it is ok to | ||
146 | * add a new bio_vec to a bio at a given offset or not. If the block device | ||
147 | * has such limitations, it needs to register a merge_bvec_fn to control | ||
148 | * the size of bio's sent to it. Note that a block device *must* allow a | ||
149 | * single page to be added to an empty bio. The block device driver may want | ||
150 | * to use the bio_split() function to deal with these bio's. By default | ||
151 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | ||
152 | * honored. | ||
153 | */ | ||
154 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | ||
155 | { | ||
156 | q->merge_bvec_fn = mbfn; | ||
157 | } | ||
158 | |||
159 | EXPORT_SYMBOL(blk_queue_merge_bvec); | ||
160 | |||
161 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | ||
162 | { | ||
163 | q->softirq_done_fn = fn; | ||
164 | } | ||
165 | |||
166 | EXPORT_SYMBOL(blk_queue_softirq_done); | ||
167 | |||
168 | /** | ||
169 | * blk_queue_make_request - define an alternate make_request function for a device | ||
170 | * @q: the request queue for the device to be affected | ||
171 | * @mfn: the alternate make_request function | ||
172 | * | ||
173 | * Description: | ||
174 | * The normal way for &struct bios to be passed to a device | ||
175 | * driver is for them to be collected into requests on a request | ||
176 | * queue, and then to allow the device driver to select requests | ||
177 | * off that queue when it is ready. This works well for many block | ||
178 | * devices. However some block devices (typically virtual devices | ||
179 | * such as md or lvm) do not benefit from the processing on the | ||
180 | * request queue, and are served best by having the requests passed | ||
181 | * directly to them. This can be achieved by providing a function | ||
182 | * to blk_queue_make_request(). | ||
183 | * | ||
184 | * Caveat: | ||
185 | * The driver that does this *must* be able to deal appropriately | ||
186 | * with buffers in "highmemory". This can be accomplished by either calling | ||
187 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | ||
188 | * blk_queue_bounce() to create a buffer in normal memory. | ||
189 | **/ | ||
190 | void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) | ||
191 | { | ||
192 | /* | ||
193 | * set defaults | ||
194 | */ | ||
195 | q->nr_requests = BLKDEV_MAX_RQ; | ||
196 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
197 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
198 | q->make_request_fn = mfn; | ||
199 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
200 | q->backing_dev_info.state = 0; | ||
201 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | ||
202 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
203 | blk_queue_hardsect_size(q, 512); | ||
204 | blk_queue_dma_alignment(q, 511); | ||
205 | blk_queue_congestion_threshold(q); | ||
206 | q->nr_batching = BLK_BATCH_REQ; | ||
207 | |||
208 | q->unplug_thresh = 4; /* hmm */ | ||
209 | q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ | ||
210 | if (q->unplug_delay == 0) | ||
211 | q->unplug_delay = 1; | ||
212 | |||
213 | INIT_WORK(&q->unplug_work, blk_unplug_work); | ||
214 | |||
215 | q->unplug_timer.function = blk_unplug_timeout; | ||
216 | q->unplug_timer.data = (unsigned long)q; | ||
217 | |||
218 | /* | ||
219 | * by default assume old behaviour and bounce for any highmem page | ||
220 | */ | ||
221 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | ||
222 | } | ||
223 | |||
224 | EXPORT_SYMBOL(blk_queue_make_request); | ||
225 | |||
226 | static void rq_init(struct request_queue *q, struct request *rq) | ||
227 | { | 93 | { |
228 | INIT_LIST_HEAD(&rq->queuelist); | 94 | INIT_LIST_HEAD(&rq->queuelist); |
229 | INIT_LIST_HEAD(&rq->donelist); | 95 | INIT_LIST_HEAD(&rq->donelist); |
@@ -247,255 +113,6 @@ static void rq_init(struct request_queue *q, struct request *rq) | |||
247 | rq->next_rq = NULL; | 113 | rq->next_rq = NULL; |
248 | } | 114 | } |
249 | 115 | ||
250 | /** | ||
251 | * blk_queue_ordered - does this queue support ordered writes | ||
252 | * @q: the request queue | ||
253 | * @ordered: one of QUEUE_ORDERED_* | ||
254 | * @prepare_flush_fn: rq setup helper for cache flush ordered writes | ||
255 | * | ||
256 | * Description: | ||
257 | * For journalled file systems, doing ordered writes on a commit | ||
258 | * block instead of explicitly doing wait_on_buffer (which is bad | ||
259 | * for performance) can be a big win. Block drivers supporting this | ||
260 | * feature should call this function and indicate so. | ||
261 | * | ||
262 | **/ | ||
263 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, | ||
264 | prepare_flush_fn *prepare_flush_fn) | ||
265 | { | ||
266 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | ||
267 | prepare_flush_fn == NULL) { | ||
268 | printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | if (ordered != QUEUE_ORDERED_NONE && | ||
273 | ordered != QUEUE_ORDERED_DRAIN && | ||
274 | ordered != QUEUE_ORDERED_DRAIN_FLUSH && | ||
275 | ordered != QUEUE_ORDERED_DRAIN_FUA && | ||
276 | ordered != QUEUE_ORDERED_TAG && | ||
277 | ordered != QUEUE_ORDERED_TAG_FLUSH && | ||
278 | ordered != QUEUE_ORDERED_TAG_FUA) { | ||
279 | printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); | ||
280 | return -EINVAL; | ||
281 | } | ||
282 | |||
283 | q->ordered = ordered; | ||
284 | q->next_ordered = ordered; | ||
285 | q->prepare_flush_fn = prepare_flush_fn; | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | EXPORT_SYMBOL(blk_queue_ordered); | ||
291 | |||
292 | /* | ||
293 | * Cache flushing for ordered writes handling | ||
294 | */ | ||
295 | inline unsigned blk_ordered_cur_seq(struct request_queue *q) | ||
296 | { | ||
297 | if (!q->ordseq) | ||
298 | return 0; | ||
299 | return 1 << ffz(q->ordseq); | ||
300 | } | ||
301 | |||
302 | unsigned blk_ordered_req_seq(struct request *rq) | ||
303 | { | ||
304 | struct request_queue *q = rq->q; | ||
305 | |||
306 | BUG_ON(q->ordseq == 0); | ||
307 | |||
308 | if (rq == &q->pre_flush_rq) | ||
309 | return QUEUE_ORDSEQ_PREFLUSH; | ||
310 | if (rq == &q->bar_rq) | ||
311 | return QUEUE_ORDSEQ_BAR; | ||
312 | if (rq == &q->post_flush_rq) | ||
313 | return QUEUE_ORDSEQ_POSTFLUSH; | ||
314 | |||
315 | /* | ||
316 | * !fs requests don't need to follow barrier ordering. Always | ||
317 | * put them at the front. This fixes the following deadlock. | ||
318 | * | ||
319 | * http://thread.gmane.org/gmane.linux.kernel/537473 | ||
320 | */ | ||
321 | if (!blk_fs_request(rq)) | ||
322 | return QUEUE_ORDSEQ_DRAIN; | ||
323 | |||
324 | if ((rq->cmd_flags & REQ_ORDERED_COLOR) == | ||
325 | (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) | ||
326 | return QUEUE_ORDSEQ_DRAIN; | ||
327 | else | ||
328 | return QUEUE_ORDSEQ_DONE; | ||
329 | } | ||
330 | |||
331 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | ||
332 | { | ||
333 | struct request *rq; | ||
334 | |||
335 | if (error && !q->orderr) | ||
336 | q->orderr = error; | ||
337 | |||
338 | BUG_ON(q->ordseq & seq); | ||
339 | q->ordseq |= seq; | ||
340 | |||
341 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) | ||
342 | return; | ||
343 | |||
344 | /* | ||
345 | * Okay, sequence complete. | ||
346 | */ | ||
347 | q->ordseq = 0; | ||
348 | rq = q->orig_bar_rq; | ||
349 | |||
350 | if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) | ||
351 | BUG(); | ||
352 | } | ||
353 | |||
354 | static void pre_flush_end_io(struct request *rq, int error) | ||
355 | { | ||
356 | elv_completed_request(rq->q, rq); | ||
357 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); | ||
358 | } | ||
359 | |||
360 | static void bar_end_io(struct request *rq, int error) | ||
361 | { | ||
362 | elv_completed_request(rq->q, rq); | ||
363 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); | ||
364 | } | ||
365 | |||
366 | static void post_flush_end_io(struct request *rq, int error) | ||
367 | { | ||
368 | elv_completed_request(rq->q, rq); | ||
369 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | ||
370 | } | ||
371 | |||
372 | static void queue_flush(struct request_queue *q, unsigned which) | ||
373 | { | ||
374 | struct request *rq; | ||
375 | rq_end_io_fn *end_io; | ||
376 | |||
377 | if (which == QUEUE_ORDERED_PREFLUSH) { | ||
378 | rq = &q->pre_flush_rq; | ||
379 | end_io = pre_flush_end_io; | ||
380 | } else { | ||
381 | rq = &q->post_flush_rq; | ||
382 | end_io = post_flush_end_io; | ||
383 | } | ||
384 | |||
385 | rq->cmd_flags = REQ_HARDBARRIER; | ||
386 | rq_init(q, rq); | ||
387 | rq->elevator_private = NULL; | ||
388 | rq->elevator_private2 = NULL; | ||
389 | rq->rq_disk = q->bar_rq.rq_disk; | ||
390 | rq->end_io = end_io; | ||
391 | q->prepare_flush_fn(q, rq); | ||
392 | |||
393 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
394 | } | ||
395 | |||
396 | static inline struct request *start_ordered(struct request_queue *q, | ||
397 | struct request *rq) | ||
398 | { | ||
399 | q->orderr = 0; | ||
400 | q->ordered = q->next_ordered; | ||
401 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | ||
402 | |||
403 | /* | ||
404 | * Prep proxy barrier request. | ||
405 | */ | ||
406 | blkdev_dequeue_request(rq); | ||
407 | q->orig_bar_rq = rq; | ||
408 | rq = &q->bar_rq; | ||
409 | rq->cmd_flags = 0; | ||
410 | rq_init(q, rq); | ||
411 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
412 | rq->cmd_flags |= REQ_RW; | ||
413 | if (q->ordered & QUEUE_ORDERED_FUA) | ||
414 | rq->cmd_flags |= REQ_FUA; | ||
415 | rq->elevator_private = NULL; | ||
416 | rq->elevator_private2 = NULL; | ||
417 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
418 | rq->end_io = bar_end_io; | ||
419 | |||
420 | /* | ||
421 | * Queue ordered sequence. As we stack them at the head, we | ||
422 | * need to queue in reverse order. Note that we rely on that | ||
423 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs | ||
424 | * request gets inbetween ordered sequence. If this request is | ||
425 | * an empty barrier, we don't need to do a postflush ever since | ||
426 | * there will be no data written between the pre and post flush. | ||
427 | * Hence a single flush will suffice. | ||
428 | */ | ||
429 | if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) | ||
430 | queue_flush(q, QUEUE_ORDERED_POSTFLUSH); | ||
431 | else | ||
432 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; | ||
433 | |||
434 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
435 | |||
436 | if (q->ordered & QUEUE_ORDERED_PREFLUSH) { | ||
437 | queue_flush(q, QUEUE_ORDERED_PREFLUSH); | ||
438 | rq = &q->pre_flush_rq; | ||
439 | } else | ||
440 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; | ||
441 | |||
442 | if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) | ||
443 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | ||
444 | else | ||
445 | rq = NULL; | ||
446 | |||
447 | return rq; | ||
448 | } | ||
449 | |||
450 | int blk_do_ordered(struct request_queue *q, struct request **rqp) | ||
451 | { | ||
452 | struct request *rq = *rqp; | ||
453 | const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); | ||
454 | |||
455 | if (!q->ordseq) { | ||
456 | if (!is_barrier) | ||
457 | return 1; | ||
458 | |||
459 | if (q->next_ordered != QUEUE_ORDERED_NONE) { | ||
460 | *rqp = start_ordered(q, rq); | ||
461 | return 1; | ||
462 | } else { | ||
463 | /* | ||
464 | * This can happen when the queue switches to | ||
465 | * ORDERED_NONE while this request is on it. | ||
466 | */ | ||
467 | blkdev_dequeue_request(rq); | ||
468 | if (__blk_end_request(rq, -EOPNOTSUPP, | ||
469 | blk_rq_bytes(rq))) | ||
470 | BUG(); | ||
471 | *rqp = NULL; | ||
472 | return 0; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Ordered sequence in progress | ||
478 | */ | ||
479 | |||
480 | /* Special requests are not subject to ordering rules. */ | ||
481 | if (!blk_fs_request(rq) && | ||
482 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) | ||
483 | return 1; | ||
484 | |||
485 | if (q->ordered & QUEUE_ORDERED_TAG) { | ||
486 | /* Ordered by tag. Blocking the next barrier is enough. */ | ||
487 | if (is_barrier && rq != &q->bar_rq) | ||
488 | *rqp = NULL; | ||
489 | } else { | ||
490 | /* Ordered by draining. Wait for turn. */ | ||
491 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); | ||
492 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) | ||
493 | *rqp = NULL; | ||
494 | } | ||
495 | |||
496 | return 1; | ||
497 | } | ||
498 | |||
499 | static void req_bio_endio(struct request *rq, struct bio *bio, | 116 | static void req_bio_endio(struct request *rq, struct bio *bio, |
500 | unsigned int nbytes, int error) | 117 | unsigned int nbytes, int error) |
501 | { | 118 | { |
@@ -528,279 +145,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
528 | } | 145 | } |
529 | } | 146 | } |
530 | 147 | ||
531 | /** | ||
532 | * blk_queue_bounce_limit - set bounce buffer limit for queue | ||
533 | * @q: the request queue for the device | ||
534 | * @dma_addr: bus address limit | ||
535 | * | ||
536 | * Description: | ||
537 | * Different hardware can have different requirements as to what pages | ||
538 | * it can do I/O directly to. A low level driver can call | ||
539 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | ||
540 | * buffers for doing I/O to pages residing above @page. | ||
541 | **/ | ||
542 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | ||
543 | { | ||
544 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; | ||
545 | int dma = 0; | ||
546 | |||
547 | q->bounce_gfp = GFP_NOIO; | ||
548 | #if BITS_PER_LONG == 64 | ||
549 | /* Assume anything <= 4GB can be handled by IOMMU. | ||
550 | Actually some IOMMUs can handle everything, but I don't | ||
551 | know of a way to test this here. */ | ||
552 | if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | ||
553 | dma = 1; | ||
554 | q->bounce_pfn = max_low_pfn; | ||
555 | #else | ||
556 | if (bounce_pfn < blk_max_low_pfn) | ||
557 | dma = 1; | ||
558 | q->bounce_pfn = bounce_pfn; | ||
559 | #endif | ||
560 | if (dma) { | ||
561 | init_emergency_isa_pool(); | ||
562 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | ||
563 | q->bounce_pfn = bounce_pfn; | ||
564 | } | ||
565 | } | ||
566 | |||
567 | EXPORT_SYMBOL(blk_queue_bounce_limit); | ||
568 | |||
569 | /** | ||
570 | * blk_queue_max_sectors - set max sectors for a request for this queue | ||
571 | * @q: the request queue for the device | ||
572 | * @max_sectors: max sectors in the usual 512b unit | ||
573 | * | ||
574 | * Description: | ||
575 | * Enables a low level driver to set an upper limit on the size of | ||
576 | * received requests. | ||
577 | **/ | ||
578 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | ||
579 | { | ||
580 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | ||
581 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | ||
582 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | ||
583 | } | ||
584 | |||
585 | if (BLK_DEF_MAX_SECTORS > max_sectors) | ||
586 | q->max_hw_sectors = q->max_sectors = max_sectors; | ||
587 | else { | ||
588 | q->max_sectors = BLK_DEF_MAX_SECTORS; | ||
589 | q->max_hw_sectors = max_sectors; | ||
590 | } | ||
591 | } | ||
592 | |||
593 | EXPORT_SYMBOL(blk_queue_max_sectors); | ||
594 | |||
595 | /** | ||
596 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | ||
597 | * @q: the request queue for the device | ||
598 | * @max_segments: max number of segments | ||
599 | * | ||
600 | * Description: | ||
601 | * Enables a low level driver to set an upper limit on the number of | ||
602 | * physical data segments in a request. This would be the largest sized | ||
603 | * scatter list the driver could handle. | ||
604 | **/ | ||
605 | void blk_queue_max_phys_segments(struct request_queue *q, | ||
606 | unsigned short max_segments) | ||
607 | { | ||
608 | if (!max_segments) { | ||
609 | max_segments = 1; | ||
610 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
611 | } | ||
612 | |||
613 | q->max_phys_segments = max_segments; | ||
614 | } | ||
615 | |||
616 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | ||
617 | |||
618 | /** | ||
619 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
620 | * @q: the request queue for the device | ||
621 | * @max_segments: max number of segments | ||
622 | * | ||
623 | * Description: | ||
624 | * Enables a low level driver to set an upper limit on the number of | ||
625 | * hw data segments in a request. This would be the largest number of | ||
626 | * address/length pairs the host adapter can actually give as once | ||
627 | * to the device. | ||
628 | **/ | ||
629 | void blk_queue_max_hw_segments(struct request_queue *q, | ||
630 | unsigned short max_segments) | ||
631 | { | ||
632 | if (!max_segments) { | ||
633 | max_segments = 1; | ||
634 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
635 | } | ||
636 | |||
637 | q->max_hw_segments = max_segments; | ||
638 | } | ||
639 | |||
640 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
641 | |||
642 | /** | ||
643 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | ||
644 | * @q: the request queue for the device | ||
645 | * @max_size: max size of segment in bytes | ||
646 | * | ||
647 | * Description: | ||
648 | * Enables a low level driver to set an upper limit on the size of a | ||
649 | * coalesced segment | ||
650 | **/ | ||
651 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | ||
652 | { | ||
653 | if (max_size < PAGE_CACHE_SIZE) { | ||
654 | max_size = PAGE_CACHE_SIZE; | ||
655 | printk("%s: set to minimum %d\n", __FUNCTION__, max_size); | ||
656 | } | ||
657 | |||
658 | q->max_segment_size = max_size; | ||
659 | } | ||
660 | |||
661 | EXPORT_SYMBOL(blk_queue_max_segment_size); | ||
662 | |||
663 | /** | ||
664 | * blk_queue_hardsect_size - set hardware sector size for the queue | ||
665 | * @q: the request queue for the device | ||
666 | * @size: the hardware sector size, in bytes | ||
667 | * | ||
668 | * Description: | ||
669 | * This should typically be set to the lowest possible sector size | ||
670 | * that the hardware can operate on (possible without reverting to | ||
671 | * even internal read-modify-write operations). Usually the default | ||
672 | * of 512 covers most hardware. | ||
673 | **/ | ||
674 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) | ||
675 | { | ||
676 | q->hardsect_size = size; | ||
677 | } | ||
678 | |||
679 | EXPORT_SYMBOL(blk_queue_hardsect_size); | ||
680 | |||
681 | /* | ||
682 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
683 | */ | ||
684 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
685 | |||
686 | /** | ||
687 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | ||
688 | * @t: the stacking driver (top) | ||
689 | * @b: the underlying device (bottom) | ||
690 | **/ | ||
691 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | ||
692 | { | ||
693 | /* zero is "infinity" */ | ||
694 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); | ||
695 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); | ||
696 | |||
697 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | ||
698 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | ||
699 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); | ||
700 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); | ||
701 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
702 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | ||
703 | } | ||
704 | |||
705 | EXPORT_SYMBOL(blk_queue_stack_limits); | ||
706 | |||
707 | /** | ||
708 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | ||
709 | * | ||
710 | * @q: the request queue for the device | ||
711 | * @buf: physically contiguous buffer | ||
712 | * @size: size of the buffer in bytes | ||
713 | * | ||
714 | * Some devices have excess DMA problems and can't simply discard (or | ||
715 | * zero fill) the unwanted piece of the transfer. They have to have a | ||
716 | * real area of memory to transfer it into. The use case for this is | ||
717 | * ATAPI devices in DMA mode. If the packet command causes a transfer | ||
718 | * bigger than the transfer size some HBAs will lock up if there | ||
719 | * aren't DMA elements to contain the excess transfer. What this API | ||
720 | * does is adjust the queue so that the buf is always appended | ||
721 | * silently to the scatterlist. | ||
722 | * | ||
723 | * Note: This routine adjusts max_hw_segments to make room for | ||
724 | * appending the drain buffer. If you call | ||
725 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | ||
726 | * calling this routine, you must set the limit to one fewer than your | ||
727 | * device can support otherwise there won't be room for the drain | ||
728 | * buffer. | ||
729 | */ | ||
730 | int blk_queue_dma_drain(struct request_queue *q, void *buf, | ||
731 | unsigned int size) | ||
732 | { | ||
733 | if (q->max_hw_segments < 2 || q->max_phys_segments < 2) | ||
734 | return -EINVAL; | ||
735 | /* make room for appending the drain */ | ||
736 | --q->max_hw_segments; | ||
737 | --q->max_phys_segments; | ||
738 | q->dma_drain_buffer = buf; | ||
739 | q->dma_drain_size = size; | ||
740 | |||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | ||
745 | |||
746 | /** | ||
747 | * blk_queue_segment_boundary - set boundary rules for segment merging | ||
748 | * @q: the request queue for the device | ||
749 | * @mask: the memory boundary mask | ||
750 | **/ | ||
751 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | ||
752 | { | ||
753 | if (mask < PAGE_CACHE_SIZE - 1) { | ||
754 | mask = PAGE_CACHE_SIZE - 1; | ||
755 | printk("%s: set to minimum %lx\n", __FUNCTION__, mask); | ||
756 | } | ||
757 | |||
758 | q->seg_boundary_mask = mask; | ||
759 | } | ||
760 | |||
761 | EXPORT_SYMBOL(blk_queue_segment_boundary); | ||
762 | |||
763 | /** | ||
764 | * blk_queue_dma_alignment - set dma length and memory alignment | ||
765 | * @q: the request queue for the device | ||
766 | * @mask: alignment mask | ||
767 | * | ||
768 | * description: | ||
769 | * set required memory and length aligment for direct dma transactions. | ||
770 | * this is used when buiding direct io requests for the queue. | ||
771 | * | ||
772 | **/ | ||
773 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | ||
774 | { | ||
775 | q->dma_alignment = mask; | ||
776 | } | ||
777 | |||
778 | EXPORT_SYMBOL(blk_queue_dma_alignment); | ||
779 | |||
780 | /** | ||
781 | * blk_queue_update_dma_alignment - update dma length and memory alignment | ||
782 | * @q: the request queue for the device | ||
783 | * @mask: alignment mask | ||
784 | * | ||
785 | * description: | ||
786 | * update required memory and length aligment for direct dma transactions. | ||
787 | * If the requested alignment is larger than the current alignment, then | ||
788 | * the current queue alignment is updated to the new value, otherwise it | ||
789 | * is left alone. The design of this is to allow multiple objects | ||
790 | * (driver, device, transport etc) to set their respective | ||
791 | * alignments without having them interfere. | ||
792 | * | ||
793 | **/ | ||
794 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | ||
795 | { | ||
796 | BUG_ON(mask > PAGE_SIZE); | ||
797 | |||
798 | if (mask > q->dma_alignment) | ||
799 | q->dma_alignment = mask; | ||
800 | } | ||
801 | |||
802 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | ||
803 | |||
804 | void blk_dump_rq_flags(struct request *rq, char *msg) | 148 | void blk_dump_rq_flags(struct request *rq, char *msg) |
805 | { | 149 | { |
806 | int bit; | 150 | int bit; |
@@ -1074,8 +418,8 @@ static inline int ll_new_hw_segment(struct request_queue *q, | |||
1074 | return 1; | 418 | return 1; |
1075 | } | 419 | } |
1076 | 420 | ||
1077 | static int ll_back_merge_fn(struct request_queue *q, struct request *req, | 421 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
1078 | struct bio *bio) | 422 | struct bio *bio) |
1079 | { | 423 | { |
1080 | unsigned short max_sectors; | 424 | unsigned short max_sectors; |
1081 | int len; | 425 | int len; |
@@ -1285,7 +629,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1285 | blk_unplug(q); | 629 | blk_unplug(q); |
1286 | } | 630 | } |
1287 | 631 | ||
1288 | static void blk_unplug_work(struct work_struct *work) | 632 | void blk_unplug_work(struct work_struct *work) |
1289 | { | 633 | { |
1290 | struct request_queue *q = | 634 | struct request_queue *q = |
1291 | container_of(work, struct request_queue, unplug_work); | 635 | container_of(work, struct request_queue, unplug_work); |
@@ -1296,7 +640,7 @@ static void blk_unplug_work(struct work_struct *work) | |||
1296 | q->unplug_fn(q); | 640 | q->unplug_fn(q); |
1297 | } | 641 | } |
1298 | 642 | ||
1299 | static void blk_unplug_timeout(unsigned long data) | 643 | void blk_unplug_timeout(unsigned long data) |
1300 | { | 644 | { |
1301 | struct request_queue *q = (struct request_queue *)data; | 645 | struct request_queue *q = (struct request_queue *)data; |
1302 | 646 | ||
@@ -1961,393 +1305,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
1961 | 1305 | ||
1962 | EXPORT_SYMBOL(blk_insert_request); | 1306 | EXPORT_SYMBOL(blk_insert_request); |
1963 | 1307 | ||
1964 | static int __blk_rq_unmap_user(struct bio *bio) | ||
1965 | { | ||
1966 | int ret = 0; | ||
1967 | |||
1968 | if (bio) { | ||
1969 | if (bio_flagged(bio, BIO_USER_MAPPED)) | ||
1970 | bio_unmap_user(bio); | ||
1971 | else | ||
1972 | ret = bio_uncopy_user(bio); | ||
1973 | } | ||
1974 | |||
1975 | return ret; | ||
1976 | } | ||
1977 | |||
1978 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
1979 | struct bio *bio) | ||
1980 | { | ||
1981 | if (!rq->bio) | ||
1982 | blk_rq_bio_prep(q, rq, bio); | ||
1983 | else if (!ll_back_merge_fn(q, rq, bio)) | ||
1984 | return -EINVAL; | ||
1985 | else { | ||
1986 | rq->biotail->bi_next = bio; | ||
1987 | rq->biotail = bio; | ||
1988 | |||
1989 | rq->data_len += bio->bi_size; | ||
1990 | } | ||
1991 | return 0; | ||
1992 | } | ||
1993 | EXPORT_SYMBOL(blk_rq_append_bio); | ||
1994 | |||
1995 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
1996 | void __user *ubuf, unsigned int len) | ||
1997 | { | ||
1998 | unsigned long uaddr; | ||
1999 | struct bio *bio, *orig_bio; | ||
2000 | int reading, ret; | ||
2001 | |||
2002 | reading = rq_data_dir(rq) == READ; | ||
2003 | |||
2004 | /* | ||
2005 | * if alignment requirement is satisfied, map in user pages for | ||
2006 | * direct dma. else, set up kernel bounce buffers | ||
2007 | */ | ||
2008 | uaddr = (unsigned long) ubuf; | ||
2009 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
2010 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
2011 | else | ||
2012 | bio = bio_copy_user(q, uaddr, len, reading); | ||
2013 | |||
2014 | if (IS_ERR(bio)) | ||
2015 | return PTR_ERR(bio); | ||
2016 | |||
2017 | orig_bio = bio; | ||
2018 | blk_queue_bounce(q, &bio); | ||
2019 | |||
2020 | /* | ||
2021 | * We link the bounce buffer in and could have to traverse it | ||
2022 | * later so we have to get a ref to prevent it from being freed | ||
2023 | */ | ||
2024 | bio_get(bio); | ||
2025 | |||
2026 | ret = blk_rq_append_bio(q, rq, bio); | ||
2027 | if (!ret) | ||
2028 | return bio->bi_size; | ||
2029 | |||
2030 | /* if it was boucned we must call the end io function */ | ||
2031 | bio_endio(bio, 0); | ||
2032 | __blk_rq_unmap_user(orig_bio); | ||
2033 | bio_put(bio); | ||
2034 | return ret; | ||
2035 | } | ||
2036 | |||
2037 | /** | ||
2038 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | ||
2039 | * @q: request queue where request should be inserted | ||
2040 | * @rq: request structure to fill | ||
2041 | * @ubuf: the user buffer | ||
2042 | * @len: length of user data | ||
2043 | * | ||
2044 | * Description: | ||
2045 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
2046 | * a kernel bounce buffer is used. | ||
2047 | * | ||
2048 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
2049 | * still in process context. | ||
2050 | * | ||
2051 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
2052 | * before being submitted to the device, as pages mapped may be out of | ||
2053 | * reach. It's the callers responsibility to make sure this happens. The | ||
2054 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
2055 | * unmapping. | ||
2056 | */ | ||
2057 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
2058 | void __user *ubuf, unsigned long len) | ||
2059 | { | ||
2060 | unsigned long bytes_read = 0; | ||
2061 | struct bio *bio = NULL; | ||
2062 | int ret; | ||
2063 | |||
2064 | if (len > (q->max_hw_sectors << 9)) | ||
2065 | return -EINVAL; | ||
2066 | if (!len || !ubuf) | ||
2067 | return -EINVAL; | ||
2068 | |||
2069 | while (bytes_read != len) { | ||
2070 | unsigned long map_len, end, start; | ||
2071 | |||
2072 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | ||
2073 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | ||
2074 | >> PAGE_SHIFT; | ||
2075 | start = (unsigned long)ubuf >> PAGE_SHIFT; | ||
2076 | |||
2077 | /* | ||
2078 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 | ||
2079 | * pages. If this happens we just lower the requested | ||
2080 | * mapping len by a page so that we can fit | ||
2081 | */ | ||
2082 | if (end - start > BIO_MAX_PAGES) | ||
2083 | map_len -= PAGE_SIZE; | ||
2084 | |||
2085 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); | ||
2086 | if (ret < 0) | ||
2087 | goto unmap_rq; | ||
2088 | if (!bio) | ||
2089 | bio = rq->bio; | ||
2090 | bytes_read += ret; | ||
2091 | ubuf += ret; | ||
2092 | } | ||
2093 | |||
2094 | rq->buffer = rq->data = NULL; | ||
2095 | return 0; | ||
2096 | unmap_rq: | ||
2097 | blk_rq_unmap_user(bio); | ||
2098 | return ret; | ||
2099 | } | ||
2100 | |||
2101 | EXPORT_SYMBOL(blk_rq_map_user); | ||
2102 | |||
2103 | /** | ||
2104 | * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage | ||
2105 | * @q: request queue where request should be inserted | ||
2106 | * @rq: request to map data to | ||
2107 | * @iov: pointer to the iovec | ||
2108 | * @iov_count: number of elements in the iovec | ||
2109 | * @len: I/O byte count | ||
2110 | * | ||
2111 | * Description: | ||
2112 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
2113 | * a kernel bounce buffer is used. | ||
2114 | * | ||
2115 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
2116 | * still in process context. | ||
2117 | * | ||
2118 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
2119 | * before being submitted to the device, as pages mapped may be out of | ||
2120 | * reach. It's the callers responsibility to make sure this happens. The | ||
2121 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
2122 | * unmapping. | ||
2123 | */ | ||
2124 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | ||
2125 | struct sg_iovec *iov, int iov_count, unsigned int len) | ||
2126 | { | ||
2127 | struct bio *bio; | ||
2128 | |||
2129 | if (!iov || iov_count <= 0) | ||
2130 | return -EINVAL; | ||
2131 | |||
2132 | /* we don't allow misaligned data like bio_map_user() does. If the | ||
2133 | * user is using sg, they're expected to know the alignment constraints | ||
2134 | * and respect them accordingly */ | ||
2135 | bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); | ||
2136 | if (IS_ERR(bio)) | ||
2137 | return PTR_ERR(bio); | ||
2138 | |||
2139 | if (bio->bi_size != len) { | ||
2140 | bio_endio(bio, 0); | ||
2141 | bio_unmap_user(bio); | ||
2142 | return -EINVAL; | ||
2143 | } | ||
2144 | |||
2145 | bio_get(bio); | ||
2146 | blk_rq_bio_prep(q, rq, bio); | ||
2147 | rq->buffer = rq->data = NULL; | ||
2148 | return 0; | ||
2149 | } | ||
2150 | |||
2151 | EXPORT_SYMBOL(blk_rq_map_user_iov); | ||
2152 | |||
2153 | /** | ||
2154 | * blk_rq_unmap_user - unmap a request with user data | ||
2155 | * @bio: start of bio list | ||
2156 | * | ||
2157 | * Description: | ||
2158 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | ||
2159 | * supply the original rq->bio from the blk_rq_map_user() return, since | ||
2160 | * the io completion may have changed rq->bio. | ||
2161 | */ | ||
2162 | int blk_rq_unmap_user(struct bio *bio) | ||
2163 | { | ||
2164 | struct bio *mapped_bio; | ||
2165 | int ret = 0, ret2; | ||
2166 | |||
2167 | while (bio) { | ||
2168 | mapped_bio = bio; | ||
2169 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | ||
2170 | mapped_bio = bio->bi_private; | ||
2171 | |||
2172 | ret2 = __blk_rq_unmap_user(mapped_bio); | ||
2173 | if (ret2 && !ret) | ||
2174 | ret = ret2; | ||
2175 | |||
2176 | mapped_bio = bio; | ||
2177 | bio = bio->bi_next; | ||
2178 | bio_put(mapped_bio); | ||
2179 | } | ||
2180 | |||
2181 | return ret; | ||
2182 | } | ||
2183 | |||
2184 | EXPORT_SYMBOL(blk_rq_unmap_user); | ||
2185 | |||
2186 | /** | ||
2187 | * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage | ||
2188 | * @q: request queue where request should be inserted | ||
2189 | * @rq: request to fill | ||
2190 | * @kbuf: the kernel buffer | ||
2191 | * @len: length of user data | ||
2192 | * @gfp_mask: memory allocation flags | ||
2193 | */ | ||
2194 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | ||
2195 | unsigned int len, gfp_t gfp_mask) | ||
2196 | { | ||
2197 | struct bio *bio; | ||
2198 | |||
2199 | if (len > (q->max_hw_sectors << 9)) | ||
2200 | return -EINVAL; | ||
2201 | if (!len || !kbuf) | ||
2202 | return -EINVAL; | ||
2203 | |||
2204 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | ||
2205 | if (IS_ERR(bio)) | ||
2206 | return PTR_ERR(bio); | ||
2207 | |||
2208 | if (rq_data_dir(rq) == WRITE) | ||
2209 | bio->bi_rw |= (1 << BIO_RW); | ||
2210 | |||
2211 | blk_rq_bio_prep(q, rq, bio); | ||
2212 | blk_queue_bounce(q, &rq->bio); | ||
2213 | rq->buffer = rq->data = NULL; | ||
2214 | return 0; | ||
2215 | } | ||
2216 | |||
2217 | EXPORT_SYMBOL(blk_rq_map_kern); | ||
2218 | |||
2219 | /** | ||
2220 | * blk_execute_rq_nowait - insert a request into queue for execution | ||
2221 | * @q: queue to insert the request in | ||
2222 | * @bd_disk: matching gendisk | ||
2223 | * @rq: request to insert | ||
2224 | * @at_head: insert request at head or tail of queue | ||
2225 | * @done: I/O completion handler | ||
2226 | * | ||
2227 | * Description: | ||
2228 | * Insert a fully prepared request at the back of the io scheduler queue | ||
2229 | * for execution. Don't wait for completion. | ||
2230 | */ | ||
2231 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | ||
2232 | struct request *rq, int at_head, | ||
2233 | rq_end_io_fn *done) | ||
2234 | { | ||
2235 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
2236 | |||
2237 | rq->rq_disk = bd_disk; | ||
2238 | rq->cmd_flags |= REQ_NOMERGE; | ||
2239 | rq->end_io = done; | ||
2240 | WARN_ON(irqs_disabled()); | ||
2241 | spin_lock_irq(q->queue_lock); | ||
2242 | __elv_add_request(q, rq, where, 1); | ||
2243 | __generic_unplug_device(q); | ||
2244 | spin_unlock_irq(q->queue_lock); | ||
2245 | } | ||
2246 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); | ||
2247 | |||
2248 | /** | ||
2249 | * blk_execute_rq - insert a request into queue for execution | ||
2250 | * @q: queue to insert the request in | ||
2251 | * @bd_disk: matching gendisk | ||
2252 | * @rq: request to insert | ||
2253 | * @at_head: insert request at head or tail of queue | ||
2254 | * | ||
2255 | * Description: | ||
2256 | * Insert a fully prepared request at the back of the io scheduler queue | ||
2257 | * for execution and wait for completion. | ||
2258 | */ | ||
2259 | int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, | ||
2260 | struct request *rq, int at_head) | ||
2261 | { | ||
2262 | DECLARE_COMPLETION_ONSTACK(wait); | ||
2263 | char sense[SCSI_SENSE_BUFFERSIZE]; | ||
2264 | int err = 0; | ||
2265 | |||
2266 | /* | ||
2267 | * we need an extra reference to the request, so we can look at | ||
2268 | * it after io completion | ||
2269 | */ | ||
2270 | rq->ref_count++; | ||
2271 | |||
2272 | if (!rq->sense) { | ||
2273 | memset(sense, 0, sizeof(sense)); | ||
2274 | rq->sense = sense; | ||
2275 | rq->sense_len = 0; | ||
2276 | } | ||
2277 | |||
2278 | rq->end_io_data = &wait; | ||
2279 | blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); | ||
2280 | wait_for_completion(&wait); | ||
2281 | |||
2282 | if (rq->errors) | ||
2283 | err = -EIO; | ||
2284 | |||
2285 | return err; | ||
2286 | } | ||
2287 | |||
2288 | EXPORT_SYMBOL(blk_execute_rq); | ||
2289 | |||
2290 | static void bio_end_empty_barrier(struct bio *bio, int err) | ||
2291 | { | ||
2292 | if (err) | ||
2293 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
2294 | |||
2295 | complete(bio->bi_private); | ||
2296 | } | ||
2297 | |||
2298 | /** | ||
2299 | * blkdev_issue_flush - queue a flush | ||
2300 | * @bdev: blockdev to issue flush for | ||
2301 | * @error_sector: error sector | ||
2302 | * | ||
2303 | * Description: | ||
2304 | * Issue a flush for the block device in question. Caller can supply | ||
2305 | * room for storing the error offset in case of a flush error, if they | ||
2306 | * wish to. Caller must run wait_for_completion() on its own. | ||
2307 | */ | ||
2308 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | ||
2309 | { | ||
2310 | DECLARE_COMPLETION_ONSTACK(wait); | ||
2311 | struct request_queue *q; | ||
2312 | struct bio *bio; | ||
2313 | int ret; | ||
2314 | |||
2315 | if (bdev->bd_disk == NULL) | ||
2316 | return -ENXIO; | ||
2317 | |||
2318 | q = bdev_get_queue(bdev); | ||
2319 | if (!q) | ||
2320 | return -ENXIO; | ||
2321 | |||
2322 | bio = bio_alloc(GFP_KERNEL, 0); | ||
2323 | if (!bio) | ||
2324 | return -ENOMEM; | ||
2325 | |||
2326 | bio->bi_end_io = bio_end_empty_barrier; | ||
2327 | bio->bi_private = &wait; | ||
2328 | bio->bi_bdev = bdev; | ||
2329 | submit_bio(1 << BIO_RW_BARRIER, bio); | ||
2330 | |||
2331 | wait_for_completion(&wait); | ||
2332 | |||
2333 | /* | ||
2334 | * The driver must store the error location in ->bi_sector, if | ||
2335 | * it supports it. For non-stacked drivers, this should be copied | ||
2336 | * from rq->sector. | ||
2337 | */ | ||
2338 | if (error_sector) | ||
2339 | *error_sector = bio->bi_sector; | ||
2340 | |||
2341 | ret = 0; | ||
2342 | if (!bio_flagged(bio, BIO_UPTODATE)) | ||
2343 | ret = -EIO; | ||
2344 | |||
2345 | bio_put(bio); | ||
2346 | return ret; | ||
2347 | } | ||
2348 | |||
2349 | EXPORT_SYMBOL(blkdev_issue_flush); | ||
2350 | |||
2351 | static void drive_stat_acct(struct request *rq, int new_io) | 1308 | static void drive_stat_acct(struct request *rq, int new_io) |
2352 | { | 1309 | { |
2353 | int rw = rq_data_dir(rq); | 1310 | int rw = rq_data_dir(rq); |
@@ -2459,26 +1416,6 @@ void blk_put_request(struct request *req) | |||
2459 | 1416 | ||
2460 | EXPORT_SYMBOL(blk_put_request); | 1417 | EXPORT_SYMBOL(blk_put_request); |
2461 | 1418 | ||
2462 | /** | ||
2463 | * blk_end_sync_rq - executes a completion event on a request | ||
2464 | * @rq: request to complete | ||
2465 | * @error: end io status of the request | ||
2466 | */ | ||
2467 | void blk_end_sync_rq(struct request *rq, int error) | ||
2468 | { | ||
2469 | struct completion *waiting = rq->end_io_data; | ||
2470 | |||
2471 | rq->end_io_data = NULL; | ||
2472 | __blk_put_request(rq->q, rq); | ||
2473 | |||
2474 | /* | ||
2475 | * complete last, if this is a stack request the process (and thus | ||
2476 | * the rq pointer) could be invalid right after this complete() | ||
2477 | */ | ||
2478 | complete(waiting); | ||
2479 | } | ||
2480 | EXPORT_SYMBOL(blk_end_sync_rq); | ||
2481 | |||
2482 | /* | 1419 | /* |
2483 | * Has to be called with the request spinlock acquired | 1420 | * Has to be called with the request spinlock acquired |
2484 | */ | 1421 | */ |
@@ -2557,7 +1494,7 @@ static inline int attempt_front_merge(struct request_queue *q, | |||
2557 | return 0; | 1494 | return 0; |
2558 | } | 1495 | } |
2559 | 1496 | ||
2560 | static void init_request_from_bio(struct request *req, struct bio *bio) | 1497 | void init_request_from_bio(struct request *req, struct bio *bio) |
2561 | { | 1498 | { |
2562 | req->cmd_type = REQ_TYPE_FS; | 1499 | req->cmd_type = REQ_TYPE_FS; |
2563 | 1500 | ||
@@ -3524,8 +2461,8 @@ int blk_end_request_callback(struct request *rq, int error, int nr_bytes, | |||
3524 | } | 2461 | } |
3525 | EXPORT_SYMBOL_GPL(blk_end_request_callback); | 2462 | EXPORT_SYMBOL_GPL(blk_end_request_callback); |
3526 | 2463 | ||
3527 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2464 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
3528 | struct bio *bio) | 2465 | struct bio *bio) |
3529 | { | 2466 | { |
3530 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ | 2467 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ |
3531 | rq->cmd_flags |= (bio->bi_rw & 3); | 2468 | rq->cmd_flags |= (bio->bi_rw & 3); |
@@ -3571,188 +2508,12 @@ int __init blk_dev_init(void) | |||
3571 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", | 2508 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", |
3572 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | 2509 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); |
3573 | 2510 | ||
3574 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | ||
3575 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | ||
3576 | |||
3577 | for_each_possible_cpu(i) | 2511 | for_each_possible_cpu(i) |
3578 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | 2512 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); |
3579 | 2513 | ||
3580 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | 2514 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); |
3581 | register_hotcpu_notifier(&blk_cpu_notifier); | 2515 | register_hotcpu_notifier(&blk_cpu_notifier); |
3582 | 2516 | ||
3583 | blk_max_low_pfn = max_low_pfn - 1; | ||
3584 | blk_max_pfn = max_pfn - 1; | ||
3585 | |||
3586 | return 0; | ||
3587 | } | ||
3588 | |||
3589 | static void cfq_dtor(struct io_context *ioc) | ||
3590 | { | ||
3591 | struct cfq_io_context *cic[1]; | ||
3592 | int r; | ||
3593 | |||
3594 | /* | ||
3595 | * We don't have a specific key to lookup with, so use the gang | ||
3596 | * lookup to just retrieve the first item stored. The cfq exit | ||
3597 | * function will iterate the full tree, so any member will do. | ||
3598 | */ | ||
3599 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
3600 | if (r > 0) | ||
3601 | cic[0]->dtor(ioc); | ||
3602 | } | ||
3603 | |||
3604 | /* | ||
3605 | * IO Context helper functions. put_io_context() returns 1 if there are no | ||
3606 | * more users of this io context, 0 otherwise. | ||
3607 | */ | ||
3608 | int put_io_context(struct io_context *ioc) | ||
3609 | { | ||
3610 | if (ioc == NULL) | ||
3611 | return 1; | ||
3612 | |||
3613 | BUG_ON(atomic_read(&ioc->refcount) == 0); | ||
3614 | |||
3615 | if (atomic_dec_and_test(&ioc->refcount)) { | ||
3616 | rcu_read_lock(); | ||
3617 | if (ioc->aic && ioc->aic->dtor) | ||
3618 | ioc->aic->dtor(ioc->aic); | ||
3619 | rcu_read_unlock(); | ||
3620 | cfq_dtor(ioc); | ||
3621 | |||
3622 | kmem_cache_free(iocontext_cachep, ioc); | ||
3623 | return 1; | ||
3624 | } | ||
3625 | return 0; | 2517 | return 0; |
3626 | } | 2518 | } |
3627 | EXPORT_SYMBOL(put_io_context); | ||
3628 | |||
3629 | static void cfq_exit(struct io_context *ioc) | ||
3630 | { | ||
3631 | struct cfq_io_context *cic[1]; | ||
3632 | int r; | ||
3633 | |||
3634 | rcu_read_lock(); | ||
3635 | /* | ||
3636 | * See comment for cfq_dtor() | ||
3637 | */ | ||
3638 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
3639 | rcu_read_unlock(); | ||
3640 | |||
3641 | if (r > 0) | ||
3642 | cic[0]->exit(ioc); | ||
3643 | } | ||
3644 | |||
3645 | /* Called by the exitting task */ | ||
3646 | void exit_io_context(void) | ||
3647 | { | ||
3648 | struct io_context *ioc; | ||
3649 | |||
3650 | task_lock(current); | ||
3651 | ioc = current->io_context; | ||
3652 | current->io_context = NULL; | ||
3653 | task_unlock(current); | ||
3654 | |||
3655 | if (atomic_dec_and_test(&ioc->nr_tasks)) { | ||
3656 | if (ioc->aic && ioc->aic->exit) | ||
3657 | ioc->aic->exit(ioc->aic); | ||
3658 | cfq_exit(ioc); | ||
3659 | |||
3660 | put_io_context(ioc); | ||
3661 | } | ||
3662 | } | ||
3663 | |||
3664 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | ||
3665 | { | ||
3666 | struct io_context *ret; | ||
3667 | |||
3668 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | ||
3669 | if (ret) { | ||
3670 | atomic_set(&ret->refcount, 1); | ||
3671 | atomic_set(&ret->nr_tasks, 1); | ||
3672 | spin_lock_init(&ret->lock); | ||
3673 | ret->ioprio_changed = 0; | ||
3674 | ret->ioprio = 0; | ||
3675 | ret->last_waited = jiffies; /* doesn't matter... */ | ||
3676 | ret->nr_batch_requests = 0; /* because this is 0 */ | ||
3677 | ret->aic = NULL; | ||
3678 | INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); | ||
3679 | ret->ioc_data = NULL; | ||
3680 | } | ||
3681 | |||
3682 | return ret; | ||
3683 | } | ||
3684 | |||
3685 | /* | ||
3686 | * If the current task has no IO context then create one and initialise it. | ||
3687 | * Otherwise, return its existing IO context. | ||
3688 | * | ||
3689 | * This returned IO context doesn't have a specifically elevated refcount, | ||
3690 | * but since the current task itself holds a reference, the context can be | ||
3691 | * used in general code, so long as it stays within `current` context. | ||
3692 | */ | ||
3693 | static struct io_context *current_io_context(gfp_t gfp_flags, int node) | ||
3694 | { | ||
3695 | struct task_struct *tsk = current; | ||
3696 | struct io_context *ret; | ||
3697 | |||
3698 | ret = tsk->io_context; | ||
3699 | if (likely(ret)) | ||
3700 | return ret; | ||
3701 | |||
3702 | ret = alloc_io_context(gfp_flags, node); | ||
3703 | if (ret) { | ||
3704 | /* make sure set_task_ioprio() sees the settings above */ | ||
3705 | smp_wmb(); | ||
3706 | tsk->io_context = ret; | ||
3707 | } | ||
3708 | |||
3709 | return ret; | ||
3710 | } | ||
3711 | |||
3712 | /* | ||
3713 | * If the current task has no IO context then create one and initialise it. | ||
3714 | * If it does have a context, take a ref on it. | ||
3715 | * | ||
3716 | * This is always called in the context of the task which submitted the I/O. | ||
3717 | */ | ||
3718 | struct io_context *get_io_context(gfp_t gfp_flags, int node) | ||
3719 | { | ||
3720 | struct io_context *ret = NULL; | ||
3721 | |||
3722 | /* | ||
3723 | * Check for unlikely race with exiting task. ioc ref count is | ||
3724 | * zero when ioc is being detached. | ||
3725 | */ | ||
3726 | do { | ||
3727 | ret = current_io_context(gfp_flags, node); | ||
3728 | if (unlikely(!ret)) | ||
3729 | break; | ||
3730 | } while (!atomic_inc_not_zero(&ret->refcount)); | ||
3731 | |||
3732 | return ret; | ||
3733 | } | ||
3734 | EXPORT_SYMBOL(get_io_context); | ||
3735 | |||
3736 | void copy_io_context(struct io_context **pdst, struct io_context **psrc) | ||
3737 | { | ||
3738 | struct io_context *src = *psrc; | ||
3739 | struct io_context *dst = *pdst; | ||
3740 | |||
3741 | if (src) { | ||
3742 | BUG_ON(atomic_read(&src->refcount) == 0); | ||
3743 | atomic_inc(&src->refcount); | ||
3744 | put_io_context(dst); | ||
3745 | *pdst = src; | ||
3746 | } | ||
3747 | } | ||
3748 | EXPORT_SYMBOL(copy_io_context); | ||
3749 | |||
3750 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2) | ||
3751 | { | ||
3752 | struct io_context *temp; | ||
3753 | temp = *ioc1; | ||
3754 | *ioc1 = *ioc2; | ||
3755 | *ioc2 = temp; | ||
3756 | } | ||
3757 | EXPORT_SYMBOL(swap_io_context); | ||
3758 | 2519 | ||
diff --git a/block/blk-exec.c b/block/blk-exec.c new file mode 100644 index 000000000000..ebfb44e959a9 --- /dev/null +++ b/block/blk-exec.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Functions related to setting various queue properties from drivers | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | #include "blk.h" | ||
10 | |||
11 | /* | ||
12 | * for max sense size | ||
13 | */ | ||
14 | #include <scsi/scsi_cmnd.h> | ||
15 | |||
16 | /** | ||
17 | * blk_end_sync_rq - executes a completion event on a request | ||
18 | * @rq: request to complete | ||
19 | * @error: end io status of the request | ||
20 | */ | ||
21 | void blk_end_sync_rq(struct request *rq, int error) | ||
22 | { | ||
23 | struct completion *waiting = rq->end_io_data; | ||
24 | |||
25 | rq->end_io_data = NULL; | ||
26 | __blk_put_request(rq->q, rq); | ||
27 | |||
28 | /* | ||
29 | * complete last, if this is a stack request the process (and thus | ||
30 | * the rq pointer) could be invalid right after this complete() | ||
31 | */ | ||
32 | complete(waiting); | ||
33 | } | ||
34 | EXPORT_SYMBOL(blk_end_sync_rq); | ||
35 | |||
36 | /** | ||
37 | * blk_execute_rq_nowait - insert a request into queue for execution | ||
38 | * @q: queue to insert the request in | ||
39 | * @bd_disk: matching gendisk | ||
40 | * @rq: request to insert | ||
41 | * @at_head: insert request at head or tail of queue | ||
42 | * @done: I/O completion handler | ||
43 | * | ||
44 | * Description: | ||
45 | * Insert a fully prepared request at the back of the io scheduler queue | ||
46 | * for execution. Don't wait for completion. | ||
47 | */ | ||
48 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | ||
49 | struct request *rq, int at_head, | ||
50 | rq_end_io_fn *done) | ||
51 | { | ||
52 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
53 | |||
54 | rq->rq_disk = bd_disk; | ||
55 | rq->cmd_flags |= REQ_NOMERGE; | ||
56 | rq->end_io = done; | ||
57 | WARN_ON(irqs_disabled()); | ||
58 | spin_lock_irq(q->queue_lock); | ||
59 | __elv_add_request(q, rq, where, 1); | ||
60 | __generic_unplug_device(q); | ||
61 | spin_unlock_irq(q->queue_lock); | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); | ||
64 | |||
65 | /** | ||
66 | * blk_execute_rq - insert a request into queue for execution | ||
67 | * @q: queue to insert the request in | ||
68 | * @bd_disk: matching gendisk | ||
69 | * @rq: request to insert | ||
70 | * @at_head: insert request at head or tail of queue | ||
71 | * | ||
72 | * Description: | ||
73 | * Insert a fully prepared request at the back of the io scheduler queue | ||
74 | * for execution and wait for completion. | ||
75 | */ | ||
76 | int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, | ||
77 | struct request *rq, int at_head) | ||
78 | { | ||
79 | DECLARE_COMPLETION_ONSTACK(wait); | ||
80 | char sense[SCSI_SENSE_BUFFERSIZE]; | ||
81 | int err = 0; | ||
82 | |||
83 | /* | ||
84 | * we need an extra reference to the request, so we can look at | ||
85 | * it after io completion | ||
86 | */ | ||
87 | rq->ref_count++; | ||
88 | |||
89 | if (!rq->sense) { | ||
90 | memset(sense, 0, sizeof(sense)); | ||
91 | rq->sense = sense; | ||
92 | rq->sense_len = 0; | ||
93 | } | ||
94 | |||
95 | rq->end_io_data = &wait; | ||
96 | blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); | ||
97 | wait_for_completion(&wait); | ||
98 | |||
99 | if (rq->errors) | ||
100 | err = -EIO; | ||
101 | |||
102 | return err; | ||
103 | } | ||
104 | |||
105 | EXPORT_SYMBOL(blk_execute_rq); | ||
diff --git a/block/blk-ioc.c b/block/blk-ioc.c new file mode 100644 index 000000000000..6d1675508eb5 --- /dev/null +++ b/block/blk-ioc.c | |||
@@ -0,0 +1,194 @@ | |||
1 | /* | ||
2 | * Functions related to io context handling | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/bio.h> | ||
8 | #include <linux/blkdev.h> | ||
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | ||
10 | |||
11 | #include "blk.h" | ||
12 | |||
13 | /* | ||
14 | * For io context allocations | ||
15 | */ | ||
16 | static struct kmem_cache *iocontext_cachep; | ||
17 | |||
18 | static void cfq_dtor(struct io_context *ioc) | ||
19 | { | ||
20 | struct cfq_io_context *cic[1]; | ||
21 | int r; | ||
22 | |||
23 | /* | ||
24 | * We don't have a specific key to lookup with, so use the gang | ||
25 | * lookup to just retrieve the first item stored. The cfq exit | ||
26 | * function will iterate the full tree, so any member will do. | ||
27 | */ | ||
28 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
29 | if (r > 0) | ||
30 | cic[0]->dtor(ioc); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * IO Context helper functions. put_io_context() returns 1 if there are no | ||
35 | * more users of this io context, 0 otherwise. | ||
36 | */ | ||
37 | int put_io_context(struct io_context *ioc) | ||
38 | { | ||
39 | if (ioc == NULL) | ||
40 | return 1; | ||
41 | |||
42 | BUG_ON(atomic_read(&ioc->refcount) == 0); | ||
43 | |||
44 | if (atomic_dec_and_test(&ioc->refcount)) { | ||
45 | rcu_read_lock(); | ||
46 | if (ioc->aic && ioc->aic->dtor) | ||
47 | ioc->aic->dtor(ioc->aic); | ||
48 | rcu_read_unlock(); | ||
49 | cfq_dtor(ioc); | ||
50 | |||
51 | kmem_cache_free(iocontext_cachep, ioc); | ||
52 | return 1; | ||
53 | } | ||
54 | return 0; | ||
55 | } | ||
56 | EXPORT_SYMBOL(put_io_context); | ||
57 | |||
58 | static void cfq_exit(struct io_context *ioc) | ||
59 | { | ||
60 | struct cfq_io_context *cic[1]; | ||
61 | int r; | ||
62 | |||
63 | rcu_read_lock(); | ||
64 | /* | ||
65 | * See comment for cfq_dtor() | ||
66 | */ | ||
67 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
68 | rcu_read_unlock(); | ||
69 | |||
70 | if (r > 0) | ||
71 | cic[0]->exit(ioc); | ||
72 | } | ||
73 | |||
74 | /* Called by the exitting task */ | ||
75 | void exit_io_context(void) | ||
76 | { | ||
77 | struct io_context *ioc; | ||
78 | |||
79 | task_lock(current); | ||
80 | ioc = current->io_context; | ||
81 | current->io_context = NULL; | ||
82 | task_unlock(current); | ||
83 | |||
84 | if (atomic_dec_and_test(&ioc->nr_tasks)) { | ||
85 | if (ioc->aic && ioc->aic->exit) | ||
86 | ioc->aic->exit(ioc->aic); | ||
87 | cfq_exit(ioc); | ||
88 | |||
89 | put_io_context(ioc); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | ||
94 | { | ||
95 | struct io_context *ret; | ||
96 | |||
97 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | ||
98 | if (ret) { | ||
99 | atomic_set(&ret->refcount, 1); | ||
100 | atomic_set(&ret->nr_tasks, 1); | ||
101 | spin_lock_init(&ret->lock); | ||
102 | ret->ioprio_changed = 0; | ||
103 | ret->ioprio = 0; | ||
104 | ret->last_waited = jiffies; /* doesn't matter... */ | ||
105 | ret->nr_batch_requests = 0; /* because this is 0 */ | ||
106 | ret->aic = NULL; | ||
107 | INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); | ||
108 | ret->ioc_data = NULL; | ||
109 | } | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * If the current task has no IO context then create one and initialise it. | ||
116 | * Otherwise, return its existing IO context. | ||
117 | * | ||
118 | * This returned IO context doesn't have a specifically elevated refcount, | ||
119 | * but since the current task itself holds a reference, the context can be | ||
120 | * used in general code, so long as it stays within `current` context. | ||
121 | */ | ||
122 | struct io_context *current_io_context(gfp_t gfp_flags, int node) | ||
123 | { | ||
124 | struct task_struct *tsk = current; | ||
125 | struct io_context *ret; | ||
126 | |||
127 | ret = tsk->io_context; | ||
128 | if (likely(ret)) | ||
129 | return ret; | ||
130 | |||
131 | ret = alloc_io_context(gfp_flags, node); | ||
132 | if (ret) { | ||
133 | /* make sure set_task_ioprio() sees the settings above */ | ||
134 | smp_wmb(); | ||
135 | tsk->io_context = ret; | ||
136 | } | ||
137 | |||
138 | return ret; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * If the current task has no IO context then create one and initialise it. | ||
143 | * If it does have a context, take a ref on it. | ||
144 | * | ||
145 | * This is always called in the context of the task which submitted the I/O. | ||
146 | */ | ||
147 | struct io_context *get_io_context(gfp_t gfp_flags, int node) | ||
148 | { | ||
149 | struct io_context *ret = NULL; | ||
150 | |||
151 | /* | ||
152 | * Check for unlikely race with exiting task. ioc ref count is | ||
153 | * zero when ioc is being detached. | ||
154 | */ | ||
155 | do { | ||
156 | ret = current_io_context(gfp_flags, node); | ||
157 | if (unlikely(!ret)) | ||
158 | break; | ||
159 | } while (!atomic_inc_not_zero(&ret->refcount)); | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | EXPORT_SYMBOL(get_io_context); | ||
164 | |||
165 | void copy_io_context(struct io_context **pdst, struct io_context **psrc) | ||
166 | { | ||
167 | struct io_context *src = *psrc; | ||
168 | struct io_context *dst = *pdst; | ||
169 | |||
170 | if (src) { | ||
171 | BUG_ON(atomic_read(&src->refcount) == 0); | ||
172 | atomic_inc(&src->refcount); | ||
173 | put_io_context(dst); | ||
174 | *pdst = src; | ||
175 | } | ||
176 | } | ||
177 | EXPORT_SYMBOL(copy_io_context); | ||
178 | |||
179 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2) | ||
180 | { | ||
181 | struct io_context *temp; | ||
182 | temp = *ioc1; | ||
183 | *ioc1 = *ioc2; | ||
184 | *ioc2 = temp; | ||
185 | } | ||
186 | EXPORT_SYMBOL(swap_io_context); | ||
187 | |||
188 | int __init blk_ioc_init(void) | ||
189 | { | ||
190 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | ||
191 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | ||
192 | return 0; | ||
193 | } | ||
194 | subsys_initcall(blk_ioc_init); | ||
diff --git a/block/blk-map.c b/block/blk-map.c new file mode 100644 index 000000000000..916cfc96ffa0 --- /dev/null +++ b/block/blk-map.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * Functions related to mapping data to requests | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | #include "blk.h" | ||
10 | |||
11 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
12 | struct bio *bio) | ||
13 | { | ||
14 | if (!rq->bio) | ||
15 | blk_rq_bio_prep(q, rq, bio); | ||
16 | else if (!ll_back_merge_fn(q, rq, bio)) | ||
17 | return -EINVAL; | ||
18 | else { | ||
19 | rq->biotail->bi_next = bio; | ||
20 | rq->biotail = bio; | ||
21 | |||
22 | rq->data_len += bio->bi_size; | ||
23 | } | ||
24 | return 0; | ||
25 | } | ||
26 | EXPORT_SYMBOL(blk_rq_append_bio); | ||
27 | |||
28 | static int __blk_rq_unmap_user(struct bio *bio) | ||
29 | { | ||
30 | int ret = 0; | ||
31 | |||
32 | if (bio) { | ||
33 | if (bio_flagged(bio, BIO_USER_MAPPED)) | ||
34 | bio_unmap_user(bio); | ||
35 | else | ||
36 | ret = bio_uncopy_user(bio); | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
43 | void __user *ubuf, unsigned int len) | ||
44 | { | ||
45 | unsigned long uaddr; | ||
46 | struct bio *bio, *orig_bio; | ||
47 | int reading, ret; | ||
48 | |||
49 | reading = rq_data_dir(rq) == READ; | ||
50 | |||
51 | /* | ||
52 | * if alignment requirement is satisfied, map in user pages for | ||
53 | * direct dma. else, set up kernel bounce buffers | ||
54 | */ | ||
55 | uaddr = (unsigned long) ubuf; | ||
56 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
57 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
58 | else | ||
59 | bio = bio_copy_user(q, uaddr, len, reading); | ||
60 | |||
61 | if (IS_ERR(bio)) | ||
62 | return PTR_ERR(bio); | ||
63 | |||
64 | orig_bio = bio; | ||
65 | blk_queue_bounce(q, &bio); | ||
66 | |||
67 | /* | ||
68 | * We link the bounce buffer in and could have to traverse it | ||
69 | * later so we have to get a ref to prevent it from being freed | ||
70 | */ | ||
71 | bio_get(bio); | ||
72 | |||
73 | ret = blk_rq_append_bio(q, rq, bio); | ||
74 | if (!ret) | ||
75 | return bio->bi_size; | ||
76 | |||
77 | /* if it was boucned we must call the end io function */ | ||
78 | bio_endio(bio, 0); | ||
79 | __blk_rq_unmap_user(orig_bio); | ||
80 | bio_put(bio); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | ||
86 | * @q: request queue where request should be inserted | ||
87 | * @rq: request structure to fill | ||
88 | * @ubuf: the user buffer | ||
89 | * @len: length of user data | ||
90 | * | ||
91 | * Description: | ||
92 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
93 | * a kernel bounce buffer is used. | ||
94 | * | ||
95 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
96 | * still in process context. | ||
97 | * | ||
98 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
99 | * before being submitted to the device, as pages mapped may be out of | ||
100 | * reach. It's the callers responsibility to make sure this happens. The | ||
101 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
102 | * unmapping. | ||
103 | */ | ||
104 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
105 | void __user *ubuf, unsigned long len) | ||
106 | { | ||
107 | unsigned long bytes_read = 0; | ||
108 | struct bio *bio = NULL; | ||
109 | int ret; | ||
110 | |||
111 | if (len > (q->max_hw_sectors << 9)) | ||
112 | return -EINVAL; | ||
113 | if (!len || !ubuf) | ||
114 | return -EINVAL; | ||
115 | |||
116 | while (bytes_read != len) { | ||
117 | unsigned long map_len, end, start; | ||
118 | |||
119 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | ||
120 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | ||
121 | >> PAGE_SHIFT; | ||
122 | start = (unsigned long)ubuf >> PAGE_SHIFT; | ||
123 | |||
124 | /* | ||
125 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 | ||
126 | * pages. If this happens we just lower the requested | ||
127 | * mapping len by a page so that we can fit | ||
128 | */ | ||
129 | if (end - start > BIO_MAX_PAGES) | ||
130 | map_len -= PAGE_SIZE; | ||
131 | |||
132 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); | ||
133 | if (ret < 0) | ||
134 | goto unmap_rq; | ||
135 | if (!bio) | ||
136 | bio = rq->bio; | ||
137 | bytes_read += ret; | ||
138 | ubuf += ret; | ||
139 | } | ||
140 | |||
141 | rq->buffer = rq->data = NULL; | ||
142 | return 0; | ||
143 | unmap_rq: | ||
144 | blk_rq_unmap_user(bio); | ||
145 | return ret; | ||
146 | } | ||
147 | |||
148 | EXPORT_SYMBOL(blk_rq_map_user); | ||
149 | |||
150 | /** | ||
151 | * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage | ||
152 | * @q: request queue where request should be inserted | ||
153 | * @rq: request to map data to | ||
154 | * @iov: pointer to the iovec | ||
155 | * @iov_count: number of elements in the iovec | ||
156 | * @len: I/O byte count | ||
157 | * | ||
158 | * Description: | ||
159 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
160 | * a kernel bounce buffer is used. | ||
161 | * | ||
162 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
163 | * still in process context. | ||
164 | * | ||
165 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
166 | * before being submitted to the device, as pages mapped may be out of | ||
167 | * reach. It's the callers responsibility to make sure this happens. The | ||
168 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
169 | * unmapping. | ||
170 | */ | ||
171 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | ||
172 | struct sg_iovec *iov, int iov_count, unsigned int len) | ||
173 | { | ||
174 | struct bio *bio; | ||
175 | |||
176 | if (!iov || iov_count <= 0) | ||
177 | return -EINVAL; | ||
178 | |||
179 | /* we don't allow misaligned data like bio_map_user() does. If the | ||
180 | * user is using sg, they're expected to know the alignment constraints | ||
181 | * and respect them accordingly */ | ||
182 | bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); | ||
183 | if (IS_ERR(bio)) | ||
184 | return PTR_ERR(bio); | ||
185 | |||
186 | if (bio->bi_size != len) { | ||
187 | bio_endio(bio, 0); | ||
188 | bio_unmap_user(bio); | ||
189 | return -EINVAL; | ||
190 | } | ||
191 | |||
192 | bio_get(bio); | ||
193 | blk_rq_bio_prep(q, rq, bio); | ||
194 | rq->buffer = rq->data = NULL; | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | EXPORT_SYMBOL(blk_rq_map_user_iov); | ||
199 | |||
200 | /** | ||
201 | * blk_rq_unmap_user - unmap a request with user data | ||
202 | * @bio: start of bio list | ||
203 | * | ||
204 | * Description: | ||
205 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | ||
206 | * supply the original rq->bio from the blk_rq_map_user() return, since | ||
207 | * the io completion may have changed rq->bio. | ||
208 | */ | ||
209 | int blk_rq_unmap_user(struct bio *bio) | ||
210 | { | ||
211 | struct bio *mapped_bio; | ||
212 | int ret = 0, ret2; | ||
213 | |||
214 | while (bio) { | ||
215 | mapped_bio = bio; | ||
216 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | ||
217 | mapped_bio = bio->bi_private; | ||
218 | |||
219 | ret2 = __blk_rq_unmap_user(mapped_bio); | ||
220 | if (ret2 && !ret) | ||
221 | ret = ret2; | ||
222 | |||
223 | mapped_bio = bio; | ||
224 | bio = bio->bi_next; | ||
225 | bio_put(mapped_bio); | ||
226 | } | ||
227 | |||
228 | return ret; | ||
229 | } | ||
230 | |||
231 | EXPORT_SYMBOL(blk_rq_unmap_user); | ||
232 | |||
233 | /** | ||
234 | * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage | ||
235 | * @q: request queue where request should be inserted | ||
236 | * @rq: request to fill | ||
237 | * @kbuf: the kernel buffer | ||
238 | * @len: length of user data | ||
239 | * @gfp_mask: memory allocation flags | ||
240 | */ | ||
241 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | ||
242 | unsigned int len, gfp_t gfp_mask) | ||
243 | { | ||
244 | struct bio *bio; | ||
245 | |||
246 | if (len > (q->max_hw_sectors << 9)) | ||
247 | return -EINVAL; | ||
248 | if (!len || !kbuf) | ||
249 | return -EINVAL; | ||
250 | |||
251 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | ||
252 | if (IS_ERR(bio)) | ||
253 | return PTR_ERR(bio); | ||
254 | |||
255 | if (rq_data_dir(rq) == WRITE) | ||
256 | bio->bi_rw |= (1 << BIO_RW); | ||
257 | |||
258 | blk_rq_bio_prep(q, rq, bio); | ||
259 | blk_queue_bounce(q, &rq->bio); | ||
260 | rq->buffer = rq->data = NULL; | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | EXPORT_SYMBOL(blk_rq_map_kern); | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c new file mode 100644 index 000000000000..4df09a1b8f43 --- /dev/null +++ b/block/blk-settings.c | |||
@@ -0,0 +1,402 @@ | |||
1 | /* | ||
2 | * Functions related to setting various queue properties from drivers | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/bio.h> | ||
8 | #include <linux/blkdev.h> | ||
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | ||
10 | |||
11 | #include "blk.h" | ||
12 | |||
13 | unsigned long blk_max_low_pfn, blk_max_pfn; | ||
14 | EXPORT_SYMBOL(blk_max_low_pfn); | ||
15 | EXPORT_SYMBOL(blk_max_pfn); | ||
16 | |||
17 | /** | ||
18 | * blk_queue_prep_rq - set a prepare_request function for queue | ||
19 | * @q: queue | ||
20 | * @pfn: prepare_request function | ||
21 | * | ||
22 | * It's possible for a queue to register a prepare_request callback which | ||
23 | * is invoked before the request is handed to the request_fn. The goal of | ||
24 | * the function is to prepare a request for I/O, it can be used to build a | ||
25 | * cdb from the request data for instance. | ||
26 | * | ||
27 | */ | ||
28 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | ||
29 | { | ||
30 | q->prep_rq_fn = pfn; | ||
31 | } | ||
32 | |||
33 | EXPORT_SYMBOL(blk_queue_prep_rq); | ||
34 | |||
35 | /** | ||
36 | * blk_queue_merge_bvec - set a merge_bvec function for queue | ||
37 | * @q: queue | ||
38 | * @mbfn: merge_bvec_fn | ||
39 | * | ||
40 | * Usually queues have static limitations on the max sectors or segments that | ||
41 | * we can put in a request. Stacking drivers may have some settings that | ||
42 | * are dynamic, and thus we have to query the queue whether it is ok to | ||
43 | * add a new bio_vec to a bio at a given offset or not. If the block device | ||
44 | * has such limitations, it needs to register a merge_bvec_fn to control | ||
45 | * the size of bio's sent to it. Note that a block device *must* allow a | ||
46 | * single page to be added to an empty bio. The block device driver may want | ||
47 | * to use the bio_split() function to deal with these bio's. By default | ||
48 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | ||
49 | * honored. | ||
50 | */ | ||
51 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | ||
52 | { | ||
53 | q->merge_bvec_fn = mbfn; | ||
54 | } | ||
55 | |||
56 | EXPORT_SYMBOL(blk_queue_merge_bvec); | ||
57 | |||
58 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | ||
59 | { | ||
60 | q->softirq_done_fn = fn; | ||
61 | } | ||
62 | |||
63 | EXPORT_SYMBOL(blk_queue_softirq_done); | ||
64 | |||
65 | /** | ||
66 | * blk_queue_make_request - define an alternate make_request function for a device | ||
67 | * @q: the request queue for the device to be affected | ||
68 | * @mfn: the alternate make_request function | ||
69 | * | ||
70 | * Description: | ||
71 | * The normal way for &struct bios to be passed to a device | ||
72 | * driver is for them to be collected into requests on a request | ||
73 | * queue, and then to allow the device driver to select requests | ||
74 | * off that queue when it is ready. This works well for many block | ||
75 | * devices. However some block devices (typically virtual devices | ||
76 | * such as md or lvm) do not benefit from the processing on the | ||
77 | * request queue, and are served best by having the requests passed | ||
78 | * directly to them. This can be achieved by providing a function | ||
79 | * to blk_queue_make_request(). | ||
80 | * | ||
81 | * Caveat: | ||
82 | * The driver that does this *must* be able to deal appropriately | ||
83 | * with buffers in "highmemory". This can be accomplished by either calling | ||
84 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | ||
85 | * blk_queue_bounce() to create a buffer in normal memory. | ||
86 | **/ | ||
87 | void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) | ||
88 | { | ||
89 | /* | ||
90 | * set defaults | ||
91 | */ | ||
92 | q->nr_requests = BLKDEV_MAX_RQ; | ||
93 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
94 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
95 | q->make_request_fn = mfn; | ||
96 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
97 | q->backing_dev_info.state = 0; | ||
98 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | ||
99 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
100 | blk_queue_hardsect_size(q, 512); | ||
101 | blk_queue_dma_alignment(q, 511); | ||
102 | blk_queue_congestion_threshold(q); | ||
103 | q->nr_batching = BLK_BATCH_REQ; | ||
104 | |||
105 | q->unplug_thresh = 4; /* hmm */ | ||
106 | q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ | ||
107 | if (q->unplug_delay == 0) | ||
108 | q->unplug_delay = 1; | ||
109 | |||
110 | INIT_WORK(&q->unplug_work, blk_unplug_work); | ||
111 | |||
112 | q->unplug_timer.function = blk_unplug_timeout; | ||
113 | q->unplug_timer.data = (unsigned long)q; | ||
114 | |||
115 | /* | ||
116 | * by default assume old behaviour and bounce for any highmem page | ||
117 | */ | ||
118 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | ||
119 | } | ||
120 | |||
121 | EXPORT_SYMBOL(blk_queue_make_request); | ||
122 | |||
123 | /** | ||
124 | * blk_queue_bounce_limit - set bounce buffer limit for queue | ||
125 | * @q: the request queue for the device | ||
126 | * @dma_addr: bus address limit | ||
127 | * | ||
128 | * Description: | ||
129 | * Different hardware can have different requirements as to what pages | ||
130 | * it can do I/O directly to. A low level driver can call | ||
131 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | ||
132 | * buffers for doing I/O to pages residing above @page. | ||
133 | **/ | ||
134 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | ||
135 | { | ||
136 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; | ||
137 | int dma = 0; | ||
138 | |||
139 | q->bounce_gfp = GFP_NOIO; | ||
140 | #if BITS_PER_LONG == 64 | ||
141 | /* Assume anything <= 4GB can be handled by IOMMU. | ||
142 | Actually some IOMMUs can handle everything, but I don't | ||
143 | know of a way to test this here. */ | ||
144 | if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | ||
145 | dma = 1; | ||
146 | q->bounce_pfn = max_low_pfn; | ||
147 | #else | ||
148 | if (bounce_pfn < blk_max_low_pfn) | ||
149 | dma = 1; | ||
150 | q->bounce_pfn = bounce_pfn; | ||
151 | #endif | ||
152 | if (dma) { | ||
153 | init_emergency_isa_pool(); | ||
154 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | ||
155 | q->bounce_pfn = bounce_pfn; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | EXPORT_SYMBOL(blk_queue_bounce_limit); | ||
160 | |||
161 | /** | ||
162 | * blk_queue_max_sectors - set max sectors for a request for this queue | ||
163 | * @q: the request queue for the device | ||
164 | * @max_sectors: max sectors in the usual 512b unit | ||
165 | * | ||
166 | * Description: | ||
167 | * Enables a low level driver to set an upper limit on the size of | ||
168 | * received requests. | ||
169 | **/ | ||
170 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | ||
171 | { | ||
172 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | ||
173 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | ||
174 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | ||
175 | } | ||
176 | |||
177 | if (BLK_DEF_MAX_SECTORS > max_sectors) | ||
178 | q->max_hw_sectors = q->max_sectors = max_sectors; | ||
179 | else { | ||
180 | q->max_sectors = BLK_DEF_MAX_SECTORS; | ||
181 | q->max_hw_sectors = max_sectors; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | EXPORT_SYMBOL(blk_queue_max_sectors); | ||
186 | |||
187 | /** | ||
188 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | ||
189 | * @q: the request queue for the device | ||
190 | * @max_segments: max number of segments | ||
191 | * | ||
192 | * Description: | ||
193 | * Enables a low level driver to set an upper limit on the number of | ||
194 | * physical data segments in a request. This would be the largest sized | ||
195 | * scatter list the driver could handle. | ||
196 | **/ | ||
197 | void blk_queue_max_phys_segments(struct request_queue *q, | ||
198 | unsigned short max_segments) | ||
199 | { | ||
200 | if (!max_segments) { | ||
201 | max_segments = 1; | ||
202 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
203 | } | ||
204 | |||
205 | q->max_phys_segments = max_segments; | ||
206 | } | ||
207 | |||
208 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | ||
209 | |||
210 | /** | ||
211 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
212 | * @q: the request queue for the device | ||
213 | * @max_segments: max number of segments | ||
214 | * | ||
215 | * Description: | ||
216 | * Enables a low level driver to set an upper limit on the number of | ||
217 | * hw data segments in a request. This would be the largest number of | ||
218 | * address/length pairs the host adapter can actually give as once | ||
219 | * to the device. | ||
220 | **/ | ||
221 | void blk_queue_max_hw_segments(struct request_queue *q, | ||
222 | unsigned short max_segments) | ||
223 | { | ||
224 | if (!max_segments) { | ||
225 | max_segments = 1; | ||
226 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
227 | } | ||
228 | |||
229 | q->max_hw_segments = max_segments; | ||
230 | } | ||
231 | |||
232 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
233 | |||
234 | /** | ||
235 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | ||
236 | * @q: the request queue for the device | ||
237 | * @max_size: max size of segment in bytes | ||
238 | * | ||
239 | * Description: | ||
240 | * Enables a low level driver to set an upper limit on the size of a | ||
241 | * coalesced segment | ||
242 | **/ | ||
243 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | ||
244 | { | ||
245 | if (max_size < PAGE_CACHE_SIZE) { | ||
246 | max_size = PAGE_CACHE_SIZE; | ||
247 | printk("%s: set to minimum %d\n", __FUNCTION__, max_size); | ||
248 | } | ||
249 | |||
250 | q->max_segment_size = max_size; | ||
251 | } | ||
252 | |||
253 | EXPORT_SYMBOL(blk_queue_max_segment_size); | ||
254 | |||
255 | /** | ||
256 | * blk_queue_hardsect_size - set hardware sector size for the queue | ||
257 | * @q: the request queue for the device | ||
258 | * @size: the hardware sector size, in bytes | ||
259 | * | ||
260 | * Description: | ||
261 | * This should typically be set to the lowest possible sector size | ||
262 | * that the hardware can operate on (possible without reverting to | ||
263 | * even internal read-modify-write operations). Usually the default | ||
264 | * of 512 covers most hardware. | ||
265 | **/ | ||
266 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) | ||
267 | { | ||
268 | q->hardsect_size = size; | ||
269 | } | ||
270 | |||
271 | EXPORT_SYMBOL(blk_queue_hardsect_size); | ||
272 | |||
273 | /* | ||
274 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
275 | */ | ||
276 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
277 | |||
278 | /** | ||
279 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | ||
280 | * @t: the stacking driver (top) | ||
281 | * @b: the underlying device (bottom) | ||
282 | **/ | ||
283 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | ||
284 | { | ||
285 | /* zero is "infinity" */ | ||
286 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); | ||
287 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); | ||
288 | |||
289 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | ||
290 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | ||
291 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); | ||
292 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); | ||
293 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
294 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | ||
295 | } | ||
296 | |||
297 | EXPORT_SYMBOL(blk_queue_stack_limits); | ||
298 | |||
299 | /** | ||
300 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | ||
301 | * | ||
302 | * @q: the request queue for the device | ||
303 | * @buf: physically contiguous buffer | ||
304 | * @size: size of the buffer in bytes | ||
305 | * | ||
306 | * Some devices have excess DMA problems and can't simply discard (or | ||
307 | * zero fill) the unwanted piece of the transfer. They have to have a | ||
308 | * real area of memory to transfer it into. The use case for this is | ||
309 | * ATAPI devices in DMA mode. If the packet command causes a transfer | ||
310 | * bigger than the transfer size some HBAs will lock up if there | ||
311 | * aren't DMA elements to contain the excess transfer. What this API | ||
312 | * does is adjust the queue so that the buf is always appended | ||
313 | * silently to the scatterlist. | ||
314 | * | ||
315 | * Note: This routine adjusts max_hw_segments to make room for | ||
316 | * appending the drain buffer. If you call | ||
317 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | ||
318 | * calling this routine, you must set the limit to one fewer than your | ||
319 | * device can support otherwise there won't be room for the drain | ||
320 | * buffer. | ||
321 | */ | ||
322 | int blk_queue_dma_drain(struct request_queue *q, void *buf, | ||
323 | unsigned int size) | ||
324 | { | ||
325 | if (q->max_hw_segments < 2 || q->max_phys_segments < 2) | ||
326 | return -EINVAL; | ||
327 | /* make room for appending the drain */ | ||
328 | --q->max_hw_segments; | ||
329 | --q->max_phys_segments; | ||
330 | q->dma_drain_buffer = buf; | ||
331 | q->dma_drain_size = size; | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | ||
337 | |||
338 | /** | ||
339 | * blk_queue_segment_boundary - set boundary rules for segment merging | ||
340 | * @q: the request queue for the device | ||
341 | * @mask: the memory boundary mask | ||
342 | **/ | ||
343 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | ||
344 | { | ||
345 | if (mask < PAGE_CACHE_SIZE - 1) { | ||
346 | mask = PAGE_CACHE_SIZE - 1; | ||
347 | printk("%s: set to minimum %lx\n", __FUNCTION__, mask); | ||
348 | } | ||
349 | |||
350 | q->seg_boundary_mask = mask; | ||
351 | } | ||
352 | |||
353 | EXPORT_SYMBOL(blk_queue_segment_boundary); | ||
354 | |||
355 | /** | ||
356 | * blk_queue_dma_alignment - set dma length and memory alignment | ||
357 | * @q: the request queue for the device | ||
358 | * @mask: alignment mask | ||
359 | * | ||
360 | * description: | ||
361 | * set required memory and length aligment for direct dma transactions. | ||
362 | * this is used when buiding direct io requests for the queue. | ||
363 | * | ||
364 | **/ | ||
365 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | ||
366 | { | ||
367 | q->dma_alignment = mask; | ||
368 | } | ||
369 | |||
370 | EXPORT_SYMBOL(blk_queue_dma_alignment); | ||
371 | |||
372 | /** | ||
373 | * blk_queue_update_dma_alignment - update dma length and memory alignment | ||
374 | * @q: the request queue for the device | ||
375 | * @mask: alignment mask | ||
376 | * | ||
377 | * description: | ||
378 | * update required memory and length aligment for direct dma transactions. | ||
379 | * If the requested alignment is larger than the current alignment, then | ||
380 | * the current queue alignment is updated to the new value, otherwise it | ||
381 | * is left alone. The design of this is to allow multiple objects | ||
382 | * (driver, device, transport etc) to set their respective | ||
383 | * alignments without having them interfere. | ||
384 | * | ||
385 | **/ | ||
386 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | ||
387 | { | ||
388 | BUG_ON(mask > PAGE_SIZE); | ||
389 | |||
390 | if (mask > q->dma_alignment) | ||
391 | q->dma_alignment = mask; | ||
392 | } | ||
393 | |||
394 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | ||
395 | |||
396 | int __init blk_settings_init(void) | ||
397 | { | ||
398 | blk_max_low_pfn = max_low_pfn - 1; | ||
399 | blk_max_pfn = max_pfn - 1; | ||
400 | return 0; | ||
401 | } | ||
402 | subsys_initcall(blk_settings_init); | ||
diff --git a/block/blk.h b/block/blk.h index d88549df1b09..083394007199 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -1,11 +1,28 @@ | |||
1 | #ifndef BLK_INTERNAL_H | 1 | #ifndef BLK_INTERNAL_H |
2 | #define BLK_INTERNAL_H | 2 | #define BLK_INTERNAL_H |
3 | 3 | ||
4 | /* Amount of time in which a process may batch requests */ | ||
5 | #define BLK_BATCH_TIME (HZ/50UL) | ||
6 | |||
7 | /* Number of requests a "batching" process may submit */ | ||
8 | #define BLK_BATCH_REQ 32 | ||
9 | |||
4 | extern struct kmem_cache *blk_requestq_cachep; | 10 | extern struct kmem_cache *blk_requestq_cachep; |
5 | extern struct kobj_type blk_queue_ktype; | 11 | extern struct kobj_type blk_queue_ktype; |
6 | 12 | ||
13 | void rq_init(struct request_queue *q, struct request *rq); | ||
14 | void init_request_from_bio(struct request *req, struct bio *bio); | ||
15 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | ||
16 | struct bio *bio); | ||
17 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | ||
18 | struct bio *bio); | ||
7 | void __blk_queue_free_tags(struct request_queue *q); | 19 | void __blk_queue_free_tags(struct request_queue *q); |
8 | 20 | ||
21 | void blk_unplug_work(struct work_struct *work); | ||
22 | void blk_unplug_timeout(unsigned long data); | ||
23 | |||
24 | struct io_context *current_io_context(gfp_t gfp_flags, int node); | ||
25 | |||
9 | void blk_queue_congestion_threshold(struct request_queue *q); | 26 | void blk_queue_congestion_threshold(struct request_queue *q); |
10 | 27 | ||
11 | /* | 28 | /* |