aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c71
1 files changed, 22 insertions, 49 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 23d2a6fe34a3..39ce64432ba6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,35 +9,6 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12void blk_recalc_rq_sectors(struct request *rq, int nsect)
13{
14 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
15 rq->hard_sector += nsect;
16 rq->hard_nr_sectors -= nsect;
17
18 /*
19 * Move the I/O submission pointers ahead if required.
20 */
21 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
22 (rq->sector <= rq->hard_sector)) {
23 rq->sector = rq->hard_sector;
24 rq->nr_sectors = rq->hard_nr_sectors;
25 rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
26 rq->current_nr_sectors = rq->hard_cur_sectors;
27 rq->buffer = bio_data(rq->bio);
28 }
29
30 /*
31 * if total number of sectors is less than the first segment
32 * size, something has gone terribly wrong
33 */
34 if (rq->nr_sectors < rq->current_nr_sectors) {
35 printk(KERN_ERR "blk: request botched\n");
36 rq->nr_sectors = rq->current_nr_sectors;
37 }
38 }
39}
40
41static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio) 13 struct bio *bio)
43{ 14{
@@ -61,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
61 * never considered part of another segment, since that 32 * never considered part of another segment, since that
62 * might change with the bounce page. 33 * might change with the bounce page.
63 */ 34 */
64 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 35 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
65 if (high || highprv) 36 if (high || highprv)
66 goto new_segment; 37 goto new_segment;
67 if (cluster) { 38 if (cluster) {
68 if (seg_size + bv->bv_len > q->max_segment_size) 39 if (seg_size + bv->bv_len
40 > queue_max_segment_size(q))
69 goto new_segment; 41 goto new_segment;
70 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 42 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
71 goto new_segment; 43 goto new_segment;
@@ -120,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
120 return 0; 92 return 0;
121 93
122 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 94 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
123 q->max_segment_size) 95 queue_max_segment_size(q))
124 return 0; 96 return 0;
125 97
126 if (!bio_has_data(bio)) 98 if (!bio_has_data(bio))
@@ -163,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
163 int nbytes = bvec->bv_len; 135 int nbytes = bvec->bv_len;
164 136
165 if (bvprv && cluster) { 137 if (bvprv && cluster) {
166 if (sg->length + nbytes > q->max_segment_size) 138 if (sg->length + nbytes > queue_max_segment_size(q))
167 goto new_segment; 139 goto new_segment;
168 140
169 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 141 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -199,8 +171,9 @@ new_segment:
199 171
200 172
201 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 173 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
202 (rq->data_len & q->dma_pad_mask)) { 174 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
203 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; 175 unsigned int pad_len =
176 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
204 177
205 sg->length += pad_len; 178 sg->length += pad_len;
206 rq->extra_len += pad_len; 179 rq->extra_len += pad_len;
@@ -233,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
233{ 206{
234 int nr_phys_segs = bio_phys_segments(q, bio); 207 int nr_phys_segs = bio_phys_segments(q, bio);
235 208
236 if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments 209 if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
237 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 210 req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
238 req->cmd_flags |= REQ_NOMERGE; 211 req->cmd_flags |= REQ_NOMERGE;
239 if (req == q->last_merge) 212 if (req == q->last_merge)
240 q->last_merge = NULL; 213 q->last_merge = NULL;
@@ -255,11 +228,11 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
255 unsigned short max_sectors; 228 unsigned short max_sectors;
256 229
257 if (unlikely(blk_pc_request(req))) 230 if (unlikely(blk_pc_request(req)))
258 max_sectors = q->max_hw_sectors; 231 max_sectors = queue_max_hw_sectors(q);
259 else 232 else
260 max_sectors = q->max_sectors; 233 max_sectors = queue_max_sectors(q);
261 234
262 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 235 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
263 req->cmd_flags |= REQ_NOMERGE; 236 req->cmd_flags |= REQ_NOMERGE;
264 if (req == q->last_merge) 237 if (req == q->last_merge)
265 q->last_merge = NULL; 238 q->last_merge = NULL;
@@ -279,12 +252,12 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
279 unsigned short max_sectors; 252 unsigned short max_sectors;
280 253
281 if (unlikely(blk_pc_request(req))) 254 if (unlikely(blk_pc_request(req)))
282 max_sectors = q->max_hw_sectors; 255 max_sectors = queue_max_hw_sectors(q);
283 else 256 else
284 max_sectors = q->max_sectors; 257 max_sectors = queue_max_sectors(q);
285 258
286 259
287 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 260 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
288 req->cmd_flags |= REQ_NOMERGE; 261 req->cmd_flags |= REQ_NOMERGE;
289 if (req == q->last_merge) 262 if (req == q->last_merge)
290 q->last_merge = NULL; 263 q->last_merge = NULL;
@@ -315,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
315 /* 288 /*
316 * Will it become too large? 289 * Will it become too large?
317 */ 290 */
318 if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) 291 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
319 return 0; 292 return 0;
320 293
321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 294 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -327,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
327 total_phys_segments--; 300 total_phys_segments--;
328 } 301 }
329 302
330 if (total_phys_segments > q->max_phys_segments) 303 if (total_phys_segments > queue_max_phys_segments(q))
331 return 0; 304 return 0;
332 305
333 if (total_phys_segments > q->max_hw_segments) 306 if (total_phys_segments > queue_max_hw_segments(q))
334 return 0; 307 return 0;
335 308
336 /* Merge is OK... */ 309 /* Merge is OK... */
@@ -345,7 +318,7 @@ static void blk_account_io_merge(struct request *req)
345 int cpu; 318 int cpu;
346 319
347 cpu = part_stat_lock(); 320 cpu = part_stat_lock();
348 part = disk_map_sector_rcu(req->rq_disk, req->sector); 321 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
349 322
350 part_round_stats(cpu, part); 323 part_round_stats(cpu, part);
351 part_dec_in_flight(part); 324 part_dec_in_flight(part);
@@ -366,7 +339,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
366 /* 339 /*
367 * not contiguous 340 * not contiguous
368 */ 341 */
369 if (req->sector + req->nr_sectors != next->sector) 342 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
370 return 0; 343 return 0;
371 344
372 if (rq_data_dir(req) != rq_data_dir(next) 345 if (rq_data_dir(req) != rq_data_dir(next)
@@ -398,7 +371,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
398 req->biotail->bi_next = next->bio; 371 req->biotail->bi_next = next->bio;
399 req->biotail = next->biotail; 372 req->biotail = next->biotail;
400 373
401 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; 374 req->__data_len += blk_rq_bytes(next);
402 375
403 elv_merge_requests(q, req, next); 376 elv_merge_requests(q, req, next);
404 377