aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-08-09 11:42:20 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:02 -0400
commite17fc0a1ccf88f6d4dcb363729f3141b0958c325 (patch)
tree0a7c2dc1c3159c2af14d87c67ca83e158b2c78b5 /block/blk-merge.c
parentd30a2605be9d5132d95944916e8f578fcfe4f976 (diff)
Allow elevators to sort/merge discard requests
But blkdev_issue_discard() still emits requests which are interpreted as soft barriers, because naïve callers might otherwise issue subsequent writes to those same sectors, which might cross on the queue (if they're reallocated quickly enough). Callers still _can_ issue non-barrier discard requests, but they have to take care of queue ordering for themselves. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5efc9e7a68b7..6cf8f0c70a51 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -11,7 +11,7 @@
11 11
12void blk_recalc_rq_sectors(struct request *rq, int nsect) 12void blk_recalc_rq_sectors(struct request *rq, int nsect)
13{ 13{
14 if (blk_fs_request(rq)) { 14 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
15 rq->hard_sector += nsect; 15 rq->hard_sector += nsect;
16 rq->hard_nr_sectors -= nsect; 16 rq->hard_nr_sectors -= nsect;
17 17
@@ -131,13 +131,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
131 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 131 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
132 return 0; 132 return 0;
133 133
134 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
135 return 0;
136 if (bio->bi_size + nxt->bi_size > q->max_segment_size) 134 if (bio->bi_size + nxt->bi_size > q->max_segment_size)
137 return 0; 135 return 0;
138 136
137 if (!bio_has_data(bio))
138 return 1;
139
140 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
141 return 0;
142
139 /* 143 /*
140 * bio and nxt are contigous in memory, check if the queue allows 144 * bio and nxt are contiguous in memory; check if the queue allows
141 * these two to be merged into one 145 * these two to be merged into one
142 */ 146 */
143 if (BIO_SEG_BOUNDARY(q, bio, nxt)) 147 if (BIO_SEG_BOUNDARY(q, bio, nxt))
@@ -153,8 +157,9 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
153 blk_recount_segments(q, bio); 157 blk_recount_segments(q, bio);
154 if (!bio_flagged(nxt, BIO_SEG_VALID)) 158 if (!bio_flagged(nxt, BIO_SEG_VALID))
155 blk_recount_segments(q, nxt); 159 blk_recount_segments(q, nxt);
156 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || 160 if (bio_has_data(bio) &&
157 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) 161 (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
162 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)))
158 return 0; 163 return 0;
159 if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) 164 if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
160 return 0; 165 return 0;
@@ -317,8 +322,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
317 if (!bio_flagged(bio, BIO_SEG_VALID)) 322 if (!bio_flagged(bio, BIO_SEG_VALID))
318 blk_recount_segments(q, bio); 323 blk_recount_segments(q, bio);
319 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; 324 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
320 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) 325 if (!bio_has_data(bio) ||
321 && !BIOVEC_VIRT_OVERSIZE(len)) { 326 (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
327 && !BIOVEC_VIRT_OVERSIZE(len))) {
322 int mergeable = ll_new_mergeable(q, req, bio); 328 int mergeable = ll_new_mergeable(q, req, bio);
323 329
324 if (mergeable) { 330 if (mergeable) {
@@ -356,8 +362,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
356 blk_recount_segments(q, bio); 362 blk_recount_segments(q, bio);
357 if (!bio_flagged(req->bio, BIO_SEG_VALID)) 363 if (!bio_flagged(req->bio, BIO_SEG_VALID))
358 blk_recount_segments(q, req->bio); 364 blk_recount_segments(q, req->bio);
359 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && 365 if (!bio_has_data(bio) ||
360 !BIOVEC_VIRT_OVERSIZE(len)) { 366 (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
367 !BIOVEC_VIRT_OVERSIZE(len))) {
361 int mergeable = ll_new_mergeable(q, req, bio); 368 int mergeable = ll_new_mergeable(q, req, bio);
362 369
363 if (mergeable) { 370 if (mergeable) {