diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-08-25 14:36:08 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-08-27 03:50:19 -0400 |
commit | aefcc28a3a63ac33a298777aa50ba43641c75241 (patch) | |
tree | d72a23323e80b45da16ea916c9abe03d6a3f6bfd /fs/bio.c | |
parent | 76029ff37f31dad64641489c610d98955217bb68 (diff) |
bio: fix __bio_copy_iov() handling of bio->bv_len
The commit c5dec1c3034f1ae3503efbf641ff3b0273b64797 introduced
__bio_copy_iov() to add bounce support to blk_rq_map_user_iov.
__bio_copy_iov() uses bio->bv_len to copy data for READ commands after
the completion but it doesn't work with a request that partially
completed. SCSI always completes a PC request as a whole but seems
some don't.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: stable@kernel.org
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 10 |
1 files changed, 5 insertions, 5 deletions
@@ -492,8 +492,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, | |||
492 | return NULL; | 492 | return NULL; |
493 | } | 493 | } |
494 | 494 | ||
495 | static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count, | 495 | static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, |
496 | int uncopy) | 496 | struct sg_iovec *iov, int iov_count, int uncopy) |
497 | { | 497 | { |
498 | int ret = 0, i; | 498 | int ret = 0, i; |
499 | struct bio_vec *bvec; | 499 | struct bio_vec *bvec; |
@@ -503,7 +503,7 @@ static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count, | |||
503 | 503 | ||
504 | __bio_for_each_segment(bvec, bio, i, 0) { | 504 | __bio_for_each_segment(bvec, bio, i, 0) { |
505 | char *bv_addr = page_address(bvec->bv_page); | 505 | char *bv_addr = page_address(bvec->bv_page); |
506 | unsigned int bv_len = bvec->bv_len; | 506 | unsigned int bv_len = iovecs[i].bv_len; |
507 | 507 | ||
508 | while (bv_len && iov_idx < iov_count) { | 508 | while (bv_len && iov_idx < iov_count) { |
509 | unsigned int bytes; | 509 | unsigned int bytes; |
@@ -555,7 +555,7 @@ int bio_uncopy_user(struct bio *bio) | |||
555 | struct bio_map_data *bmd = bio->bi_private; | 555 | struct bio_map_data *bmd = bio->bi_private; |
556 | int ret; | 556 | int ret; |
557 | 557 | ||
558 | ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1); | 558 | ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1); |
559 | 559 | ||
560 | bio_free_map_data(bmd); | 560 | bio_free_map_data(bmd); |
561 | bio_put(bio); | 561 | bio_put(bio); |
@@ -634,7 +634,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, | |||
634 | * success | 634 | * success |
635 | */ | 635 | */ |
636 | if (!write_to_vm) { | 636 | if (!write_to_vm) { |
637 | ret = __bio_copy_iov(bio, iov, iov_count, 0); | 637 | ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0); |
638 | if (ret) | 638 | if (ret) |
639 | goto cleanup; | 639 | goto cleanup; |
640 | } | 640 | } |