diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2012-09-18 12:19:26 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-09-20 08:31:41 -0400 |
commit | f31dc1cd490539e2b62a126bc4dc2495b165d772 (patch) | |
tree | e8a917161111266f72a7aea539c9562dc2f2653d | |
parent | e2a60da74fc8215c68509a89e9a69c66363153db (diff) |
block: Consolidate command flag and queue limit checks for merges
- blk_check_merge_flags() verifies that cmd_flags / bi_rw are
compatible. This function is called for both req-req and req-bio
merging.
- blk_rq_get_max_sectors() and blk_queue_get_max_sectors() can be used
to query the maximum sector count for a given request or queue. The
calls will return the right value from the queue limits given the
type of command (RW, discard, write same, etc.)
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-core.c | 3 | ||||
-rw-r--r-- | block/blk-merge.c | 30 | ||||
-rw-r--r-- | include/linux/blkdev.h | 31 |
3 files changed, 44 insertions, 20 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 5cc29299f6ac..33eded00c5b1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1866,8 +1866,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) | |||
1866 | if (!rq_mergeable(rq)) | 1866 | if (!rq_mergeable(rq)) |
1867 | return 0; | 1867 | return 0; |
1868 | 1868 | ||
1869 | if (blk_rq_sectors(rq) > queue_max_sectors(q) || | 1869 | if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { |
1870 | blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { | ||
1871 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 1870 | printk(KERN_ERR "%s: over max size limit.\n", __func__); |
1872 | return -EIO; | 1871 | return -EIO; |
1873 | } | 1872 | } |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 86710ca408b8..642b862608a1 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -275,14 +275,8 @@ no_merge: | |||
275 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | 275 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
276 | struct bio *bio) | 276 | struct bio *bio) |
277 | { | 277 | { |
278 | unsigned short max_sectors; | 278 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
279 | 279 | blk_rq_get_max_sectors(req)) { | |
280 | if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) | ||
281 | max_sectors = queue_max_hw_sectors(q); | ||
282 | else | ||
283 | max_sectors = queue_max_sectors(q); | ||
284 | |||
285 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { | ||
286 | req->cmd_flags |= REQ_NOMERGE; | 280 | req->cmd_flags |= REQ_NOMERGE; |
287 | if (req == q->last_merge) | 281 | if (req == q->last_merge) |
288 | q->last_merge = NULL; | 282 | q->last_merge = NULL; |
@@ -299,15 +293,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
299 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | 293 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
300 | struct bio *bio) | 294 | struct bio *bio) |
301 | { | 295 | { |
302 | unsigned short max_sectors; | 296 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
303 | 297 | blk_rq_get_max_sectors(req)) { | |
304 | if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) | ||
305 | max_sectors = queue_max_hw_sectors(q); | ||
306 | else | ||
307 | max_sectors = queue_max_sectors(q); | ||
308 | |||
309 | |||
310 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { | ||
311 | req->cmd_flags |= REQ_NOMERGE; | 298 | req->cmd_flags |= REQ_NOMERGE; |
312 | if (req == q->last_merge) | 299 | if (req == q->last_merge) |
313 | q->last_merge = NULL; | 300 | q->last_merge = NULL; |
@@ -338,7 +325,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
338 | /* | 325 | /* |
339 | * Will it become too large? | 326 | * Will it become too large? |
340 | */ | 327 | */ |
341 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) | 328 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
329 | blk_rq_get_max_sectors(req)) | ||
342 | return 0; | 330 | return 0; |
343 | 331 | ||
344 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | 332 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
@@ -417,6 +405,9 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
417 | if (!rq_mergeable(req) || !rq_mergeable(next)) | 405 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
418 | return 0; | 406 | return 0; |
419 | 407 | ||
408 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) | ||
409 | return 0; | ||
410 | |||
420 | /* | 411 | /* |
421 | * not contiguous | 412 | * not contiguous |
422 | */ | 413 | */ |
@@ -512,6 +503,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
512 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) | 503 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
513 | return false; | 504 | return false; |
514 | 505 | ||
506 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) | ||
507 | return false; | ||
508 | |||
515 | /* different data direction or already started, don't merge */ | 509 | /* different data direction or already started, don't merge */ |
516 | if (bio_data_dir(bio) != rq_data_dir(rq)) | 510 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
517 | return false; | 511 | return false; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3a6fea7460f1..90f7abe8f183 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -605,6 +605,18 @@ static inline bool rq_mergeable(struct request *rq) | |||
605 | return true; | 605 | return true; |
606 | } | 606 | } |
607 | 607 | ||
608 | static inline bool blk_check_merge_flags(unsigned int flags1, | ||
609 | unsigned int flags2) | ||
610 | { | ||
611 | if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) | ||
612 | return false; | ||
613 | |||
614 | if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) | ||
615 | return false; | ||
616 | |||
617 | return true; | ||
618 | } | ||
619 | |||
608 | /* | 620 | /* |
609 | * q->prep_rq_fn return values | 621 | * q->prep_rq_fn return values |
610 | */ | 622 | */ |
@@ -800,6 +812,25 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
800 | return blk_rq_cur_bytes(rq) >> 9; | 812 | return blk_rq_cur_bytes(rq) >> 9; |
801 | } | 813 | } |
802 | 814 | ||
815 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | ||
816 | unsigned int cmd_flags) | ||
817 | { | ||
818 | if (unlikely(cmd_flags & REQ_DISCARD)) | ||
819 | return q->limits.max_discard_sectors; | ||
820 | |||
821 | return q->limits.max_sectors; | ||
822 | } | ||
823 | |||
824 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | ||
825 | { | ||
826 | struct request_queue *q = rq->q; | ||
827 | |||
828 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | ||
829 | return q->limits.max_hw_sectors; | ||
830 | |||
831 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | ||
832 | } | ||
833 | |||
803 | /* | 834 | /* |
804 | * Request issue related functions. | 835 | * Request issue related functions. |
805 | */ | 836 | */ |