diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-02 11:02:59 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-02 12:25:12 -0400 |
commit | 0738854939e6ec9b9111a8cfc0ca1dfa3cff6b2e (patch) | |
tree | 617923e115af85896590d7ff7ba96baf16f4399f /block | |
parent | 7505ceaf863590b24a4c0c83b64817d26e0d51e3 (diff) |
blk-merge: fix blk_recount_segments
QUEUE_FLAG_NO_SG_MERGE is set at default for blk-mq devices,
so bio->bi_phys_segment computed may be bigger than
queue_max_segments(q) for blk-mq devices, then drivers will
fail to handle the case, for example, BUG_ON() in
virtio_queue_rq() can be triggerd for virtio-blk:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1359146
This patch fixes the issue by ignoring the QUEUE_FLAG_NO_SG_MERGE
flag if the computed bio->bi_phys_segment is bigger than
queue_max_segments(q), and the regression is caused by commit
05f1dd53152173(block: add queue flag for disabling SG merging).
Reported-by: Kick In <pierre-andre.morey@canonical.com>
Tested-by: Chris J Arges <chris.j.arges@canonical.com>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-merge.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 54535831f1e1..77881798f793 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -10,10 +10,11 @@ | |||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
13 | struct bio *bio) | 13 | struct bio *bio, |
14 | bool no_sg_merge) | ||
14 | { | 15 | { |
15 | struct bio_vec bv, bvprv = { NULL }; | 16 | struct bio_vec bv, bvprv = { NULL }; |
16 | int cluster, high, highprv = 1, no_sg_merge; | 17 | int cluster, high, highprv = 1; |
17 | unsigned int seg_size, nr_phys_segs; | 18 | unsigned int seg_size, nr_phys_segs; |
18 | struct bio *fbio, *bbio; | 19 | struct bio *fbio, *bbio; |
19 | struct bvec_iter iter; | 20 | struct bvec_iter iter; |
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
35 | cluster = blk_queue_cluster(q); | 36 | cluster = blk_queue_cluster(q); |
36 | seg_size = 0; | 37 | seg_size = 0; |
37 | nr_phys_segs = 0; | 38 | nr_phys_segs = 0; |
38 | no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); | ||
39 | high = 0; | 39 | high = 0; |
40 | for_each_bio(bio) { | 40 | for_each_bio(bio) { |
41 | bio_for_each_segment(bv, bio, iter) { | 41 | bio_for_each_segment(bv, bio, iter) { |
@@ -88,18 +88,23 @@ new_segment: | |||
88 | 88 | ||
89 | void blk_recalc_rq_segments(struct request *rq) | 89 | void blk_recalc_rq_segments(struct request *rq) |
90 | { | 90 | { |
91 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); | 91 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
92 | &rq->q->queue_flags); | ||
93 | |||
94 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, | ||
95 | no_sg_merge); | ||
92 | } | 96 | } |
93 | 97 | ||
94 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 98 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
95 | { | 99 | { |
96 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) | 100 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
101 | bio->bi_vcnt < queue_max_segments(q)) | ||
97 | bio->bi_phys_segments = bio->bi_vcnt; | 102 | bio->bi_phys_segments = bio->bi_vcnt; |
98 | else { | 103 | else { |
99 | struct bio *nxt = bio->bi_next; | 104 | struct bio *nxt = bio->bi_next; |
100 | 105 | ||
101 | bio->bi_next = NULL; | 106 | bio->bi_next = NULL; |
102 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); | 107 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
103 | bio->bi_next = nxt; | 108 | bio->bi_next = nxt; |
104 | } | 109 | } |
105 | 110 | ||