diff options
| author | Ming Lei <ming.lei@redhat.com> | 2017-12-18 02:40:43 -0500 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2017-12-18 15:55:43 -0500 |
| commit | 14cb0dc6479dc5ebc63b3a459a5d89a2f1b39fed (patch) | |
| tree | db336ec3ba6267370615b5bc60c104f03f924bef | |
| parent | a9fa99146ab4fc029ba5551a1a3a0102fae7fddf (diff) | |
block: don't let passthrough IO go into .make_request_fn()
Commit a8821f3f3("block: Improvements to bounce-buffer handling") tries
to make sure that the bio to .make_request_fn won't exceed BIO_MAX_PAGES,
but ignores that passthrough I/O can use blk_queue_bounce() too.
Especially, passthrough IO may not be sector-aligned, and the check
of 'sectors < bio_sectors(*bio_orig)' inside __blk_queue_bounce() may
become true even though the max bvec number doesn't exceed BIO_MAX_PAGES,
then cause the bio splitted, and the original passthrough bio is submited
to generic_make_request().
This patch fixes this issue by checking if the bio is passthrough IO,
and use bio_kmalloc() to allocate the cloned passthrough bio.
Cc: NeilBrown <neilb@suse.com>
Fixes: a8821f3f3("block: Improvements to bounce-buffer handling")
Tested-by: Michele Ballabio <barra_cuda@katamail.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| -rw-r--r-- | block/bounce.c | 6 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 21 |
2 files changed, 23 insertions, 4 deletions
diff --git a/block/bounce.c b/block/bounce.c index fceb1a96480b..1d05c422c932 100644 --- a/block/bounce.c +++ b/block/bounce.c | |||
| @@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
| 200 | unsigned i = 0; | 200 | unsigned i = 0; |
| 201 | bool bounce = false; | 201 | bool bounce = false; |
| 202 | int sectors = 0; | 202 | int sectors = 0; |
| 203 | bool passthrough = bio_is_passthrough(*bio_orig); | ||
| 203 | 204 | ||
| 204 | bio_for_each_segment(from, *bio_orig, iter) { | 205 | bio_for_each_segment(from, *bio_orig, iter) { |
| 205 | if (i++ < BIO_MAX_PAGES) | 206 | if (i++ < BIO_MAX_PAGES) |
| @@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
| 210 | if (!bounce) | 211 | if (!bounce) |
| 211 | return; | 212 | return; |
| 212 | 213 | ||
| 213 | if (sectors < bio_sectors(*bio_orig)) { | 214 | if (!passthrough && sectors < bio_sectors(*bio_orig)) { |
| 214 | bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); | 215 | bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); |
| 215 | bio_chain(bio, *bio_orig); | 216 | bio_chain(bio, *bio_orig); |
| 216 | generic_make_request(*bio_orig); | 217 | generic_make_request(*bio_orig); |
| 217 | *bio_orig = bio; | 218 | *bio_orig = bio; |
| 218 | } | 219 | } |
| 219 | bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set); | 220 | bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL : |
| 221 | bounce_bio_set); | ||
| 220 | 222 | ||
| 221 | bio_for_each_segment_all(to, bio, i) { | 223 | bio_for_each_segment_all(to, bio, i) { |
| 222 | struct page *page = to->bv_page; | 224 | struct page *page = to->bv_page; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8089ca17db9a..abd06f540863 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -241,14 +241,24 @@ struct request { | |||
| 241 | struct request *next_rq; | 241 | struct request *next_rq; |
| 242 | }; | 242 | }; |
| 243 | 243 | ||
| 244 | static inline bool blk_op_is_scsi(unsigned int op) | ||
| 245 | { | ||
| 246 | return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; | ||
| 247 | } | ||
| 248 | |||
| 249 | static inline bool blk_op_is_private(unsigned int op) | ||
| 250 | { | ||
| 251 | return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; | ||
| 252 | } | ||
| 253 | |||
| 244 | static inline bool blk_rq_is_scsi(struct request *rq) | 254 | static inline bool blk_rq_is_scsi(struct request *rq) |
| 245 | { | 255 | { |
| 246 | return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; | 256 | return blk_op_is_scsi(req_op(rq)); |
| 247 | } | 257 | } |
| 248 | 258 | ||
| 249 | static inline bool blk_rq_is_private(struct request *rq) | 259 | static inline bool blk_rq_is_private(struct request *rq) |
| 250 | { | 260 | { |
| 251 | return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; | 261 | return blk_op_is_private(req_op(rq)); |
| 252 | } | 262 | } |
| 253 | 263 | ||
| 254 | static inline bool blk_rq_is_passthrough(struct request *rq) | 264 | static inline bool blk_rq_is_passthrough(struct request *rq) |
| @@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq) | |||
| 256 | return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); | 266 | return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); |
| 257 | } | 267 | } |
| 258 | 268 | ||
| 269 | static inline bool bio_is_passthrough(struct bio *bio) | ||
| 270 | { | ||
| 271 | unsigned op = bio_op(bio); | ||
| 272 | |||
| 273 | return blk_op_is_scsi(op) || blk_op_is_private(op); | ||
| 274 | } | ||
| 275 | |||
| 259 | static inline unsigned short req_get_ioprio(struct request *req) | 276 | static inline unsigned short req_get_ioprio(struct request *req) |
| 260 | { | 277 | { |
| 261 | return req->ioprio; | 278 | return req->ioprio; |
