diff options
-rw-r--r-- | block/blk-map.c | 47 | ||||
-rw-r--r-- | include/linux/uio.h | 1 | ||||
-rw-r--r-- | lib/iov_iter.c | 19 |
3 files changed, 28 insertions, 39 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index a54f0543b956..b9f88b7751fb 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -9,24 +9,6 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static bool iovec_gap_to_prv(struct request_queue *q, | ||
13 | struct iovec *prv, struct iovec *cur) | ||
14 | { | ||
15 | unsigned long prev_end; | ||
16 | |||
17 | if (!queue_virt_boundary(q)) | ||
18 | return false; | ||
19 | |||
20 | if (prv->iov_base == NULL && prv->iov_len == 0) | ||
21 | /* prv is not set - don't check */ | ||
22 | return false; | ||
23 | |||
24 | prev_end = (unsigned long)(prv->iov_base + prv->iov_len); | ||
25 | |||
26 | return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || | ||
27 | prev_end & queue_virt_boundary(q)); | ||
28 | } | ||
29 | |||
30 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 12 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
31 | struct bio *bio) | 13 | struct bio *bio) |
32 | { | 14 | { |
@@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
125 | struct rq_map_data *map_data, | 107 | struct rq_map_data *map_data, |
126 | const struct iov_iter *iter, gfp_t gfp_mask) | 108 | const struct iov_iter *iter, gfp_t gfp_mask) |
127 | { | 109 | { |
128 | struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; | 110 | bool copy = false; |
129 | bool copy = (q->dma_pad_mask & iter->count) || map_data; | 111 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); |
130 | struct bio *bio = NULL; | 112 | struct bio *bio = NULL; |
131 | struct iov_iter i; | 113 | struct iov_iter i; |
132 | int ret; | 114 | int ret; |
133 | 115 | ||
134 | if (!iter || !iter->count) | 116 | if (map_data) |
135 | return -EINVAL; | 117 | copy = true; |
136 | 118 | else if (iov_iter_alignment(iter) & align) | |
137 | iov_for_each(iov, i, *iter) { | 119 | copy = true; |
138 | unsigned long uaddr = (unsigned long) iov.iov_base; | 120 | else if (queue_virt_boundary(q)) |
139 | 121 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); | |
140 | if (!iov.iov_len) | ||
141 | return -EINVAL; | ||
142 | |||
143 | /* | ||
144 | * Keep going so we check length of all segments | ||
145 | */ | ||
146 | if ((uaddr & queue_dma_alignment(q)) || | ||
147 | iovec_gap_to_prv(q, &prv, &iov)) | ||
148 | copy = true; | ||
149 | |||
150 | prv.iov_base = iov.iov_base; | ||
151 | prv.iov_len = iov.iov_len; | ||
152 | } | ||
153 | 122 | ||
154 | i = *iter; | 123 | i = *iter; |
155 | do { | 124 | do { |
diff --git a/include/linux/uio.h b/include/linux/uio.h index fd9bcfedad42..1b5d1cd796e2 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -87,6 +87,7 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); | |||
87 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); | 87 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); |
88 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); | 88 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); |
89 | unsigned long iov_iter_alignment(const struct iov_iter *i); | 89 | unsigned long iov_iter_alignment(const struct iov_iter *i); |
90 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i); | ||
90 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | 91 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, |
91 | unsigned long nr_segs, size_t count); | 92 | unsigned long nr_segs, size_t count); |
92 | void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, | 93 | void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 5fecddc32b1b..ca5316e0087b 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -569,6 +569,25 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) | |||
569 | } | 569 | } |
570 | EXPORT_SYMBOL(iov_iter_alignment); | 570 | EXPORT_SYMBOL(iov_iter_alignment); |
571 | 571 | ||
572 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) | ||
573 | { | ||
574 | unsigned long res = 0; | ||
575 | size_t size = i->count; | ||
576 | if (!size) | ||
577 | return 0; | ||
578 | |||
579 | iterate_all_kinds(i, size, v, | ||
580 | (res |= (!res ? 0 : (unsigned long)v.iov_base) | | ||
581 | (size != v.iov_len ? size : 0), 0), | ||
582 | (res |= (!res ? 0 : (unsigned long)v.bv_offset) | | ||
583 | (size != v.bv_len ? size : 0)), | ||
584 | (res |= (!res ? 0 : (unsigned long)v.iov_base) | | ||
585 | (size != v.iov_len ? size : 0)) | ||
586 | ); | ||
587 | return res; | ||
588 | } | ||
589 | EXPORT_SYMBOL(iov_iter_gap_alignment); | ||
590 | |||
572 | ssize_t iov_iter_get_pages(struct iov_iter *i, | 591 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
573 | struct page **pages, size_t maxsize, unsigned maxpages, | 592 | struct page **pages, size_t maxsize, unsigned maxpages, |
574 | size_t *start) | 593 | size_t *start) |