aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-08-25 14:36:08 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-08-27 03:50:19 -0400
commit76029ff37f31dad64641489c610d98955217bb68 (patch)
treee8f729fa6288676be357295141a0950f171a62d8 /fs/bio.c
parent48fd4f93a00eac844678629f2f00518e146ed30d (diff)
bio: fix bio_copy_kern() handling of bio->bv_len
The commit 68154e90c9d1492d570671ae181d9a8f8530da55 introduced bio_copy_kern() to add bounce support to blk_rq_map_kern. bio_copy_kern() uses bio->bv_len to copy data for READ commands after the completion but it doesn't work with a request that partially completed. SCSI always completes a PC request as a whole but seems some don't. This patch fixes bio_copy_kern to handle the above case. As bio_copy_user does, bio_copy_kern uses struct bio_map_data to store struct bio_vec. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reported-by: Nix <nix@esperi.org.uk> Tested-by: Nix <nix@esperi.org.uk> Cc: stable@kernel.org Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 8000e2fa16cb..8b1f5ee6f83c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -469,20 +469,21 @@ static void bio_free_map_data(struct bio_map_data *bmd)
469 kfree(bmd); 469 kfree(bmd);
470} 470}
471 471
472static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count) 472static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
473 gfp_t gfp_mask)
473{ 474{
474 struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); 475 struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
475 476
476 if (!bmd) 477 if (!bmd)
477 return NULL; 478 return NULL;
478 479
479 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); 480 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
480 if (!bmd->iovecs) { 481 if (!bmd->iovecs) {
481 kfree(bmd); 482 kfree(bmd);
482 return NULL; 483 return NULL;
483 } 484 }
484 485
485 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL); 486 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
486 if (bmd->sgvecs) 487 if (bmd->sgvecs)
487 return bmd; 488 return bmd;
488 489
@@ -596,7 +597,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
596 len += iov[i].iov_len; 597 len += iov[i].iov_len;
597 } 598 }
598 599
599 bmd = bio_alloc_map_data(nr_pages, iov_count); 600 bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL);
600 if (!bmd) 601 if (!bmd)
601 return ERR_PTR(-ENOMEM); 602 return ERR_PTR(-ENOMEM);
602 603
@@ -942,19 +943,22 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
942{ 943{
943 struct bio_vec *bvec; 944 struct bio_vec *bvec;
944 const int read = bio_data_dir(bio) == READ; 945 const int read = bio_data_dir(bio) == READ;
945 char *p = bio->bi_private; 946 struct bio_map_data *bmd = bio->bi_private;
946 int i; 947 int i;
948 char *p = bmd->sgvecs[0].iov_base;
947 949
948 __bio_for_each_segment(bvec, bio, i, 0) { 950 __bio_for_each_segment(bvec, bio, i, 0) {
949 char *addr = page_address(bvec->bv_page); 951 char *addr = page_address(bvec->bv_page);
952 int len = bmd->iovecs[i].bv_len;
950 953
951 if (read && !err) 954 if (read && !err)
952 memcpy(p, addr, bvec->bv_len); 955 memcpy(p, addr, len);
953 956
954 __free_page(bvec->bv_page); 957 __free_page(bvec->bv_page);
955 p += bvec->bv_len; 958 p += len;
956 } 959 }
957 960
961 bio_free_map_data(bmd);
958 bio_put(bio); 962 bio_put(bio);
959} 963}
960 964
@@ -978,11 +982,21 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
978 const int nr_pages = end - start; 982 const int nr_pages = end - start;
979 struct bio *bio; 983 struct bio *bio;
980 struct bio_vec *bvec; 984 struct bio_vec *bvec;
985 struct bio_map_data *bmd;
981 int i, ret; 986 int i, ret;
987 struct sg_iovec iov;
988
989 iov.iov_base = data;
990 iov.iov_len = len;
991
992 bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
993 if (!bmd)
994 return ERR_PTR(-ENOMEM);
982 995
996 ret = -ENOMEM;
983 bio = bio_alloc(gfp_mask, nr_pages); 997 bio = bio_alloc(gfp_mask, nr_pages);
984 if (!bio) 998 if (!bio)
985 return ERR_PTR(-ENOMEM); 999 goto out_bmd;
986 1000
987 while (len) { 1001 while (len) {
988 struct page *page; 1002 struct page *page;
@@ -1016,14 +1030,18 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1016 } 1030 }
1017 } 1031 }
1018 1032
1019 bio->bi_private = data; 1033 bio->bi_private = bmd;
1020 bio->bi_end_io = bio_copy_kern_endio; 1034 bio->bi_end_io = bio_copy_kern_endio;
1035
1036 bio_set_map_data(bmd, bio, &iov, 1);
1021 return bio; 1037 return bio;
1022cleanup: 1038cleanup:
1023 bio_for_each_segment(bvec, bio, i) 1039 bio_for_each_segment(bvec, bio, i)
1024 __free_page(bvec->bv_page); 1040 __free_page(bvec->bv_page);
1025 1041
1026 bio_put(bio); 1042 bio_put(bio);
1043out_bmd:
1044 bio_free_map_data(bmd);
1027 1045
1028 return ERR_PTR(ret); 1046 return ERR_PTR(ret);
1029} 1047}