diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2017-09-24 10:21:15 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2017-10-10 23:55:14 -0400 |
commit | 1cfd0ddd82232804e03f3023f6a58b50dfef0574 (patch) | |
tree | d947cd8a4539b38383065b80d1cc1e3dff6df948 | |
parent | 2b04e8f6bbb196cab4b232af0f8d48ff2c7a8058 (diff) |
bio_copy_user_iov(): don't ignore ->iov_offset
Since "block: support large requests in blk_rq_map_user_iov" we
started to call it with partially drained iter; that works fine
on the write side, but reads create a copy of iter for completion
time. And that needs to take the possibility of ->iov_iter != 0
into account...
Cc: stable@vger.kernel.org #v4.5+
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | block/bio.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/block/bio.c b/block/bio.c index 9e9606d26cc6..101c2a9b5481 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
1239 | */ | 1239 | */ |
1240 | bmd->is_our_pages = map_data ? 0 : 1; | 1240 | bmd->is_our_pages = map_data ? 0 : 1; |
1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); | 1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); |
1242 | iov_iter_init(&bmd->iter, iter->type, bmd->iov, | 1242 | bmd->iter = *iter; |
1243 | iter->nr_segs, iter->count); | 1243 | bmd->iter.iov = bmd->iov; |
1244 | 1244 | ||
1245 | ret = -ENOMEM; | 1245 | ret = -ENOMEM; |
1246 | bio = bio_kmalloc(gfp_mask, nr_pages); | 1246 | bio = bio_kmalloc(gfp_mask, nr_pages); |