aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2015-01-18 10:16:31 -0500
committerJens Axboe <axboe@fb.com>2015-02-05 11:30:40 -0500
commit26e49cfc7e988a76bf1e55cef0d9e438e5489180 (patch)
tree43a2a3590738ef4097b63719e35fcc761a66e844 /block
parent1dfa0f68c040080c5fefa7211b4ec34d202f8570 (diff)
block: pass iov_iter to the BLOCK_PC mapping functions
Make use of a new interface provided by iov_iter, backed by scatter-gather list of iovec, instead of the old interface based on sg_iovec. Also use iov_iter_advance() instead of manual iteration. This commit should contain only literal replacements, without functional changes. Cc: Christoph Hellwig <hch@infradead.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Doug Gilbert <dgilbert@interlog.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Signed-off-by: Kent Overstreet <kmo@daterainc.com> [dpark: add more description in commit message] Signed-off-by: Dongsu Park <dongsu.park@profitbricks.com> [hch: fixed to do a deep clone of the iov_iter, and to properly use the iov_iter direction] Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c146
-rw-r--r--block/blk-map.c38
-rw-r--r--block/scsi_ioctl.c17
3 files changed, 90 insertions, 111 deletions
diff --git a/block/bio.c b/block/bio.c
index 0895f694f440..7d8c6555e3f3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -28,7 +28,6 @@
28#include <linux/mempool.h> 28#include <linux/mempool.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/cgroup.h> 30#include <linux/cgroup.h>
31#include <scsi/sg.h> /* for struct sg_iovec */
32 31
33#include <trace/events/block.h> 32#include <trace/events/block.h>
34 33
@@ -1022,21 +1021,11 @@ void bio_copy_data(struct bio *dst, struct bio *src)
1022EXPORT_SYMBOL(bio_copy_data); 1021EXPORT_SYMBOL(bio_copy_data);
1023 1022
1024struct bio_map_data { 1023struct bio_map_data {
1025 int nr_sgvecs;
1026 int is_our_pages; 1024 int is_our_pages;
1027 struct sg_iovec sgvecs[]; 1025 struct iov_iter iter;
1026 struct iovec iov[];
1028}; 1027};
1029 1028
1030static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
1031 const struct sg_iovec *iov, int iov_count,
1032 int is_our_pages)
1033{
1034 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
1035 bmd->nr_sgvecs = iov_count;
1036 bmd->is_our_pages = is_our_pages;
1037 bio->bi_private = bmd;
1038}
1039
1040static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, 1029static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1041 gfp_t gfp_mask) 1030 gfp_t gfp_mask)
1042{ 1031{
@@ -1044,36 +1033,33 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1044 return NULL; 1033 return NULL;
1045 1034
1046 return kmalloc(sizeof(struct bio_map_data) + 1035 return kmalloc(sizeof(struct bio_map_data) +
1047 sizeof(struct sg_iovec) * iov_count, gfp_mask); 1036 sizeof(struct iovec) * iov_count, gfp_mask);
1048} 1037}
1049 1038
1050static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count, 1039static int __bio_copy_iov(struct bio *bio, const struct iov_iter *iter,
1051 int to_user, int from_user) 1040 int to_user, int from_user)
1052{ 1041{
1053 int ret = 0, i; 1042 int ret = 0, i;
1054 struct bio_vec *bvec; 1043 struct bio_vec *bvec;
1055 int iov_idx = 0; 1044 struct iov_iter iov_iter = *iter;
1056 unsigned int iov_off = 0;
1057 1045
1058 bio_for_each_segment_all(bvec, bio, i) { 1046 bio_for_each_segment_all(bvec, bio, i) {
1059 char *bv_addr = page_address(bvec->bv_page); 1047 char *bv_addr = page_address(bvec->bv_page);
1060 unsigned int bv_len = bvec->bv_len; 1048 unsigned int bv_len = bvec->bv_len;
1061 1049
1062 while (bv_len && iov_idx < iov_count) { 1050 while (bv_len && iov_iter.count) {
1063 unsigned int bytes; 1051 struct iovec iov = iov_iter_iovec(&iov_iter);
1064 char __user *iov_addr; 1052 unsigned int bytes = min_t(unsigned int, bv_len,
1065 1053 iov.iov_len);
1066 bytes = min_t(unsigned int,
1067 iov[iov_idx].iov_len - iov_off, bv_len);
1068 iov_addr = iov[iov_idx].iov_base + iov_off;
1069 1054
1070 if (!ret) { 1055 if (!ret) {
1071 if (to_user) 1056 if (to_user)
1072 ret = copy_to_user(iov_addr, bv_addr, 1057 ret = copy_to_user(iov.iov_base,
1073 bytes); 1058 bv_addr, bytes);
1074 1059
1075 if (from_user) 1060 if (from_user)
1076 ret = copy_from_user(bv_addr, iov_addr, 1061 ret = copy_from_user(bv_addr,
1062 iov.iov_base,
1077 bytes); 1063 bytes);
1078 1064
1079 if (ret) 1065 if (ret)
@@ -1082,13 +1068,7 @@ static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_c
1082 1068
1083 bv_len -= bytes; 1069 bv_len -= bytes;
1084 bv_addr += bytes; 1070 bv_addr += bytes;
1085 iov_addr += bytes; 1071 iov_iter_advance(&iov_iter, bytes);
1086 iov_off += bytes;
1087
1088 if (iov[iov_idx].iov_len == iov_off) {
1089 iov_idx++;
1090 iov_off = 0;
1091 }
1092 } 1072 }
1093 } 1073 }
1094 1074
@@ -1122,7 +1102,7 @@ int bio_uncopy_user(struct bio *bio)
1122 * don't copy into a random user address space, just free. 1102 * don't copy into a random user address space, just free.
1123 */ 1103 */
1124 if (current->mm) 1104 if (current->mm)
1125 ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1105 ret = __bio_copy_iov(bio, &bmd->iter,
1126 bio_data_dir(bio) == READ, 0); 1106 bio_data_dir(bio) == READ, 0);
1127 if (bmd->is_our_pages) 1107 if (bmd->is_our_pages)
1128 bio_free_pages(bio); 1108 bio_free_pages(bio);
@@ -1135,12 +1115,10 @@ EXPORT_SYMBOL(bio_uncopy_user);
1135 1115
1136/** 1116/**
1137 * bio_copy_user_iov - copy user data to bio 1117 * bio_copy_user_iov - copy user data to bio
1138 * @q: destination block queue 1118 * @q: destination block queue
1139 * @map_data: pointer to the rq_map_data holding pages (if necessary) 1119 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1140 * @iov: the iovec. 1120 * @iter: iovec iterator
1141 * @iov_count: number of elements in the iovec 1121 * @gfp_mask: memory allocation flags
1142 * @write_to_vm: bool indicating writing to pages or not
1143 * @gfp_mask: memory allocation flags
1144 * 1122 *
1145 * Prepares and returns a bio for indirect user io, bouncing data 1123 * Prepares and returns a bio for indirect user io, bouncing data
1146 * to/from kernel pages as necessary. Must be paired with 1124 * to/from kernel pages as necessary. Must be paired with
@@ -1148,24 +1126,25 @@ EXPORT_SYMBOL(bio_uncopy_user);
1148 */ 1126 */
1149struct bio *bio_copy_user_iov(struct request_queue *q, 1127struct bio *bio_copy_user_iov(struct request_queue *q,
1150 struct rq_map_data *map_data, 1128 struct rq_map_data *map_data,
1151 const struct sg_iovec *iov, int iov_count, 1129 const struct iov_iter *iter,
1152 int write_to_vm, gfp_t gfp_mask) 1130 gfp_t gfp_mask)
1153{ 1131{
1154 struct bio_map_data *bmd; 1132 struct bio_map_data *bmd;
1155 struct page *page; 1133 struct page *page;
1156 struct bio *bio; 1134 struct bio *bio;
1157 int i, ret; 1135 int i, ret;
1158 int nr_pages = 0; 1136 int nr_pages = 0;
1159 unsigned int len = 0; 1137 unsigned int len = iter->count;
1160 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0; 1138 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
1161 1139
1162 for (i = 0; i < iov_count; i++) { 1140 for (i = 0; i < iter->nr_segs; i++) {
1163 unsigned long uaddr; 1141 unsigned long uaddr;
1164 unsigned long end; 1142 unsigned long end;
1165 unsigned long start; 1143 unsigned long start;
1166 1144
1167 uaddr = (unsigned long)iov[i].iov_base; 1145 uaddr = (unsigned long) iter->iov[i].iov_base;
1168 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1146 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1147 >> PAGE_SHIFT;
1169 start = uaddr >> PAGE_SHIFT; 1148 start = uaddr >> PAGE_SHIFT;
1170 1149
1171 /* 1150 /*
@@ -1175,22 +1154,31 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1175 return ERR_PTR(-EINVAL); 1154 return ERR_PTR(-EINVAL);
1176 1155
1177 nr_pages += end - start; 1156 nr_pages += end - start;
1178 len += iov[i].iov_len;
1179 } 1157 }
1180 1158
1181 if (offset) 1159 if (offset)
1182 nr_pages++; 1160 nr_pages++;
1183 1161
1184 bmd = bio_alloc_map_data(iov_count, gfp_mask); 1162 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1185 if (!bmd) 1163 if (!bmd)
1186 return ERR_PTR(-ENOMEM); 1164 return ERR_PTR(-ENOMEM);
1187 1165
1166 /*
1167 * We need to do a deep copy of the iov_iter including the iovecs.
1168 * The caller provided iov might point to an on-stack or otherwise
1169 * shortlived one.
1170 */
1171 bmd->is_our_pages = map_data ? 0 : 1;
1172 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1173 iov_iter_init(&bmd->iter, iter->type, bmd->iov,
1174 iter->nr_segs, iter->count);
1175
1188 ret = -ENOMEM; 1176 ret = -ENOMEM;
1189 bio = bio_kmalloc(gfp_mask, nr_pages); 1177 bio = bio_kmalloc(gfp_mask, nr_pages);
1190 if (!bio) 1178 if (!bio)
1191 goto out_bmd; 1179 goto out_bmd;
1192 1180
1193 if (!write_to_vm) 1181 if (iter->type & WRITE)
1194 bio->bi_rw |= REQ_WRITE; 1182 bio->bi_rw |= REQ_WRITE;
1195 1183
1196 ret = 0; 1184 ret = 0;
@@ -1238,14 +1226,14 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1238 /* 1226 /*
1239 * success 1227 * success
1240 */ 1228 */
1241 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || 1229 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1242 (map_data && map_data->from_user)) { 1230 (map_data && map_data->from_user)) {
1243 ret = __bio_copy_iov(bio, iov, iov_count, 0, 1); 1231 ret = __bio_copy_iov(bio, iter, 0, 1);
1244 if (ret) 1232 if (ret)
1245 goto cleanup; 1233 goto cleanup;
1246 } 1234 }
1247 1235
1248 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1); 1236 bio->bi_private = bmd;
1249 return bio; 1237 return bio;
1250cleanup: 1238cleanup:
1251 if (!map_data) 1239 if (!map_data)
@@ -1258,19 +1246,21 @@ out_bmd:
1258 1246
1259static struct bio *__bio_map_user_iov(struct request_queue *q, 1247static struct bio *__bio_map_user_iov(struct request_queue *q,
1260 struct block_device *bdev, 1248 struct block_device *bdev,
1261 const struct sg_iovec *iov, int iov_count, 1249 const struct iov_iter *iter,
1262 int write_to_vm, gfp_t gfp_mask) 1250 gfp_t gfp_mask)
1263{ 1251{
1264 int i, j; 1252 int j;
1265 int nr_pages = 0; 1253 int nr_pages = 0;
1266 struct page **pages; 1254 struct page **pages;
1267 struct bio *bio; 1255 struct bio *bio;
1268 int cur_page = 0; 1256 int cur_page = 0;
1269 int ret, offset; 1257 int ret, offset;
1258 struct iov_iter i;
1259 struct iovec iov;
1270 1260
1271 for (i = 0; i < iov_count; i++) { 1261 iov_for_each(iov, i, *iter) {
1272 unsigned long uaddr = (unsigned long)iov[i].iov_base; 1262 unsigned long uaddr = (unsigned long) iov.iov_base;
1273 unsigned long len = iov[i].iov_len; 1263 unsigned long len = iov.iov_len;
1274 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1264 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1275 unsigned long start = uaddr >> PAGE_SHIFT; 1265 unsigned long start = uaddr >> PAGE_SHIFT;
1276 1266
@@ -1300,16 +1290,17 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
1300 if (!pages) 1290 if (!pages)
1301 goto out; 1291 goto out;
1302 1292
1303 for (i = 0; i < iov_count; i++) { 1293 iov_for_each(iov, i, *iter) {
1304 unsigned long uaddr = (unsigned long)iov[i].iov_base; 1294 unsigned long uaddr = (unsigned long) iov.iov_base;
1305 unsigned long len = iov[i].iov_len; 1295 unsigned long len = iov.iov_len;
1306 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1296 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1307 unsigned long start = uaddr >> PAGE_SHIFT; 1297 unsigned long start = uaddr >> PAGE_SHIFT;
1308 const int local_nr_pages = end - start; 1298 const int local_nr_pages = end - start;
1309 const int page_limit = cur_page + local_nr_pages; 1299 const int page_limit = cur_page + local_nr_pages;
1310 1300
1311 ret = get_user_pages_fast(uaddr, local_nr_pages, 1301 ret = get_user_pages_fast(uaddr, local_nr_pages,
1312 write_to_vm, &pages[cur_page]); 1302 (iter->type & WRITE) != WRITE,
1303 &pages[cur_page]);
1313 if (ret < local_nr_pages) { 1304 if (ret < local_nr_pages) {
1314 ret = -EFAULT; 1305 ret = -EFAULT;
1315 goto out_unmap; 1306 goto out_unmap;
@@ -1349,7 +1340,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
1349 /* 1340 /*
1350 * set data direction, and check if mapped pages need bouncing 1341 * set data direction, and check if mapped pages need bouncing
1351 */ 1342 */
1352 if (!write_to_vm) 1343 if (iter->type & WRITE)
1353 bio->bi_rw |= REQ_WRITE; 1344 bio->bi_rw |= REQ_WRITE;
1354 1345
1355 bio->bi_bdev = bdev; 1346 bio->bi_bdev = bdev;
@@ -1357,10 +1348,10 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
1357 return bio; 1348 return bio;
1358 1349
1359 out_unmap: 1350 out_unmap:
1360 for (i = 0; i < nr_pages; i++) { 1351 for (j = 0; j < nr_pages; j++) {
1361 if(!pages[i]) 1352 if (!pages[j])
1362 break; 1353 break;
1363 page_cache_release(pages[i]); 1354 page_cache_release(pages[j]);
1364 } 1355 }
1365 out: 1356 out:
1366 kfree(pages); 1357 kfree(pages);
@@ -1369,25 +1360,22 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
1369} 1360}
1370 1361
1371/** 1362/**
1372 * bio_map_user_iov - map user sg_iovec table into bio 1363 * bio_map_user_iov - map user iovec into bio
1373 * @q: the struct request_queue for the bio 1364 * @q: the struct request_queue for the bio
1374 * @bdev: destination block device 1365 * @bdev: destination block device
1375 * @iov: the iovec. 1366 * @iter: iovec iterator
1376 * @iov_count: number of elements in the iovec 1367 * @gfp_mask: memory allocation flags
1377 * @write_to_vm: bool indicating writing to pages or not
1378 * @gfp_mask: memory allocation flags
1379 * 1368 *
1380 * Map the user space address into a bio suitable for io to a block 1369 * Map the user space address into a bio suitable for io to a block
1381 * device. Returns an error pointer in case of error. 1370 * device. Returns an error pointer in case of error.
1382 */ 1371 */
1383struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 1372struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1384 const struct sg_iovec *iov, int iov_count, 1373 const struct iov_iter *iter,
1385 int write_to_vm, gfp_t gfp_mask) 1374 gfp_t gfp_mask)
1386{ 1375{
1387 struct bio *bio; 1376 struct bio *bio;
1388 1377
1389 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm, 1378 bio = __bio_map_user_iov(q, bdev, iter, gfp_mask);
1390 gfp_mask);
1391 if (IS_ERR(bio)) 1379 if (IS_ERR(bio))
1392 return bio; 1380 return bio;
1393 1381
diff --git a/block/blk-map.c b/block/blk-map.c
index 152a5fe5d85e..30e6bb871c5c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -5,7 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bio.h> 6#include <linux/bio.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <scsi/sg.h> /* for struct sg_iovec */ 8#include <linux/uio.h>
9 9
10#include "blk.h" 10#include "blk.h"
11 11
@@ -44,9 +44,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
44 * @q: request queue where request should be inserted 44 * @q: request queue where request should be inserted
45 * @rq: request to map data to 45 * @rq: request to map data to
46 * @map_data: pointer to the rq_map_data holding pages (if necessary) 46 * @map_data: pointer to the rq_map_data holding pages (if necessary)
47 * @iov: pointer to the iovec 47 * @iter: iovec iterator
48 * @iov_count: number of elements in the iovec
49 * @len: I/O byte count
50 * @gfp_mask: memory allocation flags 48 * @gfp_mask: memory allocation flags
51 * 49 *
52 * Description: 50 * Description:
@@ -63,20 +61,21 @@ static int __blk_rq_unmap_user(struct bio *bio)
63 * unmapping. 61 * unmapping.
64 */ 62 */
65int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 63int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
66 struct rq_map_data *map_data, const struct sg_iovec *iov, 64 struct rq_map_data *map_data,
67 int iov_count, unsigned int len, gfp_t gfp_mask) 65 const struct iov_iter *iter, gfp_t gfp_mask)
68{ 66{
69 struct bio *bio; 67 struct bio *bio;
70 int i, read = rq_data_dir(rq) == READ;
71 int unaligned = 0; 68 int unaligned = 0;
69 struct iov_iter i;
70 struct iovec iov;
72 71
73 if (!iov || iov_count <= 0) 72 if (!iter || !iter->count)
74 return -EINVAL; 73 return -EINVAL;
75 74
76 for (i = 0; i < iov_count; i++) { 75 iov_for_each(iov, i, *iter) {
77 unsigned long uaddr = (unsigned long)iov[i].iov_base; 76 unsigned long uaddr = (unsigned long) iov.iov_base;
78 77
79 if (!iov[i].iov_len) 78 if (!iov.iov_len)
80 return -EINVAL; 79 return -EINVAL;
81 80
82 /* 81 /*
@@ -86,16 +85,15 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
86 unaligned = 1; 85 unaligned = 1;
87 } 86 }
88 87
89 if (unaligned || (q->dma_pad_mask & len) || map_data) 88 if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
90 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, 89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
91 gfp_mask);
92 else 90 else
93 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); 91 bio = bio_map_user_iov(q, NULL, iter, gfp_mask);
94 92
95 if (IS_ERR(bio)) 93 if (IS_ERR(bio))
96 return PTR_ERR(bio); 94 return PTR_ERR(bio);
97 95
98 if (bio->bi_iter.bi_size != len) { 96 if (bio->bi_iter.bi_size != iter->count) {
99 /* 97 /*
100 * Grab an extra reference to this bio, as bio_unmap_user() 98 * Grab an extra reference to this bio, as bio_unmap_user()
101 * expects to be able to drop it twice as it happens on the 99 * expects to be able to drop it twice as it happens on the
@@ -121,12 +119,14 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
121 struct rq_map_data *map_data, void __user *ubuf, 119 struct rq_map_data *map_data, void __user *ubuf,
122 unsigned long len, gfp_t gfp_mask) 120 unsigned long len, gfp_t gfp_mask)
123{ 121{
124 struct sg_iovec iov; 122 struct iovec iov;
123 struct iov_iter i;
125 124
126 iov.iov_base = (void __user *)ubuf; 125 iov.iov_base = ubuf;
127 iov.iov_len = len; 126 iov.iov_len = len;
127 iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len);
128 128
129 return blk_rq_map_user_iov(q, rq, map_data, &iov, 1, len, gfp_mask); 129 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
130} 130}
131EXPORT_SYMBOL(blk_rq_map_user); 131EXPORT_SYMBOL(blk_rq_map_user);
132 132
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 28163fad3c5d..e1f71c396193 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -332,7 +332,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
332 332
333 ret = 0; 333 ret = 0;
334 if (hdr->iovec_count) { 334 if (hdr->iovec_count) {
335 size_t iov_data_len; 335 struct iov_iter i;
336 struct iovec *iov = NULL; 336 struct iovec *iov = NULL;
337 337
338 ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count, 338 ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
@@ -342,20 +342,11 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
342 goto out_free_cdb; 342 goto out_free_cdb;
343 } 343 }
344 344
345 iov_data_len = ret;
346 ret = 0;
347
348 /* SG_IO howto says that the shorter of the two wins */ 345 /* SG_IO howto says that the shorter of the two wins */
349 if (hdr->dxfer_len < iov_data_len) { 346 iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count,
350 hdr->iovec_count = iov_shorten(iov, 347 min_t(unsigned, ret, hdr->dxfer_len));
351 hdr->iovec_count,
352 hdr->dxfer_len);
353 iov_data_len = hdr->dxfer_len;
354 }
355 348
356 ret = blk_rq_map_user_iov(q, rq, NULL, (struct sg_iovec *) iov, 349 ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
357 hdr->iovec_count,
358 iov_data_len, GFP_KERNEL);
359 kfree(iov); 350 kfree(iov);
360 } else if (hdr->dxfer_len) 351 } else if (hdr->dxfer_len)
361 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, 352 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,