diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-05-07 03:33:55 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-05-07 03:33:55 -0400 |
commit | 2cdf79cafbd11580f5b63cd4993b45c1c4952415 (patch) | |
tree | c0f4b7eea00127675785174a79ff0ac4c6d2669a /block | |
parent | 7f3d4ee108c184ab215036051087aaaaa8de7661 (diff) |
block: get rid of likely/unlikely predictions in merge logic
They tend to depend a lot on the workload, so not a clear-cut
likely or unlikely fit.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-merge.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 73b23562af2..651136aae76 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
149 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | 149 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, |
150 | struct bio *nxt) | 150 | struct bio *nxt) |
151 | { | 151 | { |
152 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 152 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
153 | blk_recount_segments(q, bio); | 153 | blk_recount_segments(q, bio); |
154 | if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) | 154 | if (!bio_flagged(nxt, BIO_SEG_VALID)) |
155 | blk_recount_segments(q, nxt); | 155 | blk_recount_segments(q, nxt); |
156 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | 156 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || |
157 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) | 157 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) |
@@ -312,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
312 | q->last_merge = NULL; | 312 | q->last_merge = NULL; |
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) | 315 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
316 | blk_recount_segments(q, req->biotail); | 316 | blk_recount_segments(q, req->biotail); |
317 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 317 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
318 | blk_recount_segments(q, bio); | 318 | blk_recount_segments(q, bio); |
319 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | 319 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; |
320 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) | 320 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) |
@@ -352,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
352 | return 0; | 352 | return 0; |
353 | } | 353 | } |
354 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | 354 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; |
355 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 355 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
356 | blk_recount_segments(q, bio); | 356 | blk_recount_segments(q, bio); |
357 | if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) | 357 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
358 | blk_recount_segments(q, req->bio); | 358 | blk_recount_segments(q, req->bio); |
359 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | 359 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && |
360 | !BIOVEC_VIRT_OVERSIZE(len)) { | 360 | !BIOVEC_VIRT_OVERSIZE(len)) { |