diff options
author | Christoph Hellwig <hch@lst.de> | 2015-01-18 10:16:30 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-02-05 11:30:39 -0500 |
commit | 1dfa0f68c040080c5fefa7211b4ec34d202f8570 (patch) | |
tree | 572a04efee9750a118a43579fef0381dd9997461 /block/bio.c | |
parent | ddad8dd0a162fde61646a627a3017c258601dc8a (diff) |
block: add a helper to free bio bounce buffer pages
The code sniplet to walk all bio_vecs and free their pages is opencoded in
way to many places, so factor it into a helper. Also convert the slightly
more complex cases in bio_kern_endio and __bio_copy_iov where we break
the freeing from an existing loop into a separate one.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/bio.c')
-rw-r--r-- | block/bio.c | 65 |
1 files changed, 33 insertions, 32 deletions
diff --git a/block/bio.c b/block/bio.c index 879921e6b049..0895f694f440 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -1048,7 +1048,7 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, | |||
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count, | 1050 | static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count, |
1051 | int to_user, int from_user, int do_free_page) | 1051 | int to_user, int from_user) |
1052 | { | 1052 | { |
1053 | int ret = 0, i; | 1053 | int ret = 0, i; |
1054 | struct bio_vec *bvec; | 1054 | struct bio_vec *bvec; |
@@ -1090,14 +1090,20 @@ static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_c | |||
1090 | iov_off = 0; | 1090 | iov_off = 0; |
1091 | } | 1091 | } |
1092 | } | 1092 | } |
1093 | |||
1094 | if (do_free_page) | ||
1095 | __free_page(bvec->bv_page); | ||
1096 | } | 1093 | } |
1097 | 1094 | ||
1098 | return ret; | 1095 | return ret; |
1099 | } | 1096 | } |
1100 | 1097 | ||
1098 | static void bio_free_pages(struct bio *bio) | ||
1099 | { | ||
1100 | struct bio_vec *bvec; | ||
1101 | int i; | ||
1102 | |||
1103 | bio_for_each_segment_all(bvec, bio, i) | ||
1104 | __free_page(bvec->bv_page); | ||
1105 | } | ||
1106 | |||
1101 | /** | 1107 | /** |
1102 | * bio_uncopy_user - finish previously mapped bio | 1108 | * bio_uncopy_user - finish previously mapped bio |
1103 | * @bio: bio being terminated | 1109 | * @bio: bio being terminated |
@@ -1108,8 +1114,7 @@ static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_c | |||
1108 | int bio_uncopy_user(struct bio *bio) | 1114 | int bio_uncopy_user(struct bio *bio) |
1109 | { | 1115 | { |
1110 | struct bio_map_data *bmd = bio->bi_private; | 1116 | struct bio_map_data *bmd = bio->bi_private; |
1111 | struct bio_vec *bvec; | 1117 | int ret = 0; |
1112 | int ret = 0, i; | ||
1113 | 1118 | ||
1114 | if (!bio_flagged(bio, BIO_NULL_MAPPED)) { | 1119 | if (!bio_flagged(bio, BIO_NULL_MAPPED)) { |
1115 | /* | 1120 | /* |
@@ -1118,11 +1123,9 @@ int bio_uncopy_user(struct bio *bio) | |||
1118 | */ | 1123 | */ |
1119 | if (current->mm) | 1124 | if (current->mm) |
1120 | ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, | 1125 | ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, |
1121 | bio_data_dir(bio) == READ, | 1126 | bio_data_dir(bio) == READ, 0); |
1122 | 0, bmd->is_our_pages); | 1127 | if (bmd->is_our_pages) |
1123 | else if (bmd->is_our_pages) | 1128 | bio_free_pages(bio); |
1124 | bio_for_each_segment_all(bvec, bio, i) | ||
1125 | __free_page(bvec->bv_page); | ||
1126 | } | 1129 | } |
1127 | kfree(bmd); | 1130 | kfree(bmd); |
1128 | bio_put(bio); | 1131 | bio_put(bio); |
@@ -1149,7 +1152,6 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
1149 | int write_to_vm, gfp_t gfp_mask) | 1152 | int write_to_vm, gfp_t gfp_mask) |
1150 | { | 1153 | { |
1151 | struct bio_map_data *bmd; | 1154 | struct bio_map_data *bmd; |
1152 | struct bio_vec *bvec; | ||
1153 | struct page *page; | 1155 | struct page *page; |
1154 | struct bio *bio; | 1156 | struct bio *bio; |
1155 | int i, ret; | 1157 | int i, ret; |
@@ -1238,7 +1240,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
1238 | */ | 1240 | */ |
1239 | if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || | 1241 | if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || |
1240 | (map_data && map_data->from_user)) { | 1242 | (map_data && map_data->from_user)) { |
1241 | ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0); | 1243 | ret = __bio_copy_iov(bio, iov, iov_count, 0, 1); |
1242 | if (ret) | 1244 | if (ret) |
1243 | goto cleanup; | 1245 | goto cleanup; |
1244 | } | 1246 | } |
@@ -1247,9 +1249,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
1247 | return bio; | 1249 | return bio; |
1248 | cleanup: | 1250 | cleanup: |
1249 | if (!map_data) | 1251 | if (!map_data) |
1250 | bio_for_each_segment_all(bvec, bio, i) | 1252 | bio_free_pages(bio); |
1251 | __free_page(bvec->bv_page); | ||
1252 | |||
1253 | bio_put(bio); | 1253 | bio_put(bio); |
1254 | out_bmd: | 1254 | out_bmd: |
1255 | kfree(bmd); | 1255 | kfree(bmd); |
@@ -1510,22 +1510,22 @@ EXPORT_SYMBOL(bio_map_kern); | |||
1510 | 1510 | ||
1511 | static void bio_copy_kern_endio(struct bio *bio, int err) | 1511 | static void bio_copy_kern_endio(struct bio *bio, int err) |
1512 | { | 1512 | { |
1513 | struct bio_vec *bvec; | 1513 | bio_free_pages(bio); |
1514 | const int read = bio_data_dir(bio) == READ; | 1514 | bio_put(bio); |
1515 | } | ||
1516 | |||
1517 | static void bio_copy_kern_endio_read(struct bio *bio, int err) | ||
1518 | { | ||
1515 | char *p = bio->bi_private; | 1519 | char *p = bio->bi_private; |
1520 | struct bio_vec *bvec; | ||
1516 | int i; | 1521 | int i; |
1517 | 1522 | ||
1518 | bio_for_each_segment_all(bvec, bio, i) { | 1523 | bio_for_each_segment_all(bvec, bio, i) { |
1519 | char *addr = page_address(bvec->bv_page); | 1524 | memcpy(p, page_address(bvec->bv_page), bvec->bv_len); |
1520 | |||
1521 | if (read) | ||
1522 | memcpy(p, addr, bvec->bv_len); | ||
1523 | |||
1524 | __free_page(bvec->bv_page); | ||
1525 | p += bvec->bv_len; | 1525 | p += bvec->bv_len; |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | bio_put(bio); | 1528 | bio_copy_kern_endio(bio, err); |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | /** | 1531 | /** |
@@ -1545,10 +1545,9 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
1545 | unsigned long kaddr = (unsigned long)data; | 1545 | unsigned long kaddr = (unsigned long)data; |
1546 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1546 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1547 | unsigned long start = kaddr >> PAGE_SHIFT; | 1547 | unsigned long start = kaddr >> PAGE_SHIFT; |
1548 | struct bio_vec *bvec; | ||
1549 | struct bio *bio; | 1548 | struct bio *bio; |
1550 | void *p = data; | 1549 | void *p = data; |
1551 | int nr_pages = 0, i; | 1550 | int nr_pages = 0; |
1552 | 1551 | ||
1553 | /* | 1552 | /* |
1554 | * Overflow, abort | 1553 | * Overflow, abort |
@@ -1582,16 +1581,18 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
1582 | p += bytes; | 1581 | p += bytes; |
1583 | } | 1582 | } |
1584 | 1583 | ||
1585 | if (!reading) | 1584 | if (reading) { |
1585 | bio->bi_end_io = bio_copy_kern_endio_read; | ||
1586 | bio->bi_private = data; | ||
1587 | } else { | ||
1588 | bio->bi_end_io = bio_copy_kern_endio; | ||
1586 | bio->bi_rw |= REQ_WRITE; | 1589 | bio->bi_rw |= REQ_WRITE; |
1590 | } | ||
1587 | 1591 | ||
1588 | bio->bi_private = data; | ||
1589 | bio->bi_end_io = bio_copy_kern_endio; | ||
1590 | return bio; | 1592 | return bio; |
1591 | 1593 | ||
1592 | cleanup: | 1594 | cleanup: |
1593 | bio_for_each_segment_all(bvec, bio, i) | 1595 | bio_free_pages(bio); |
1594 | __free_page(bvec->bv_page); | ||
1595 | bio_put(bio); | 1596 | bio_put(bio); |
1596 | return ERR_PTR(-ENOMEM); | 1597 | return ERR_PTR(-ENOMEM); |
1597 | } | 1598 | } |