summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-06-29 22:25:14 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2017-07-07 05:18:09 -0400
commit09fc68dc66f7597bdc8898c991609a48f061bed5 (patch)
treeae0af5ee4b37d0133ed57c0c6f5e625430221ff5 /lib
parent72e809ed81edf81b93d3a36b7238ba50d67f043d (diff)
iov_iter: saner checks on copyin/copyout
* might_fault() is better checked in caller (and e.g. fault-in + kmap_atomic codepath also needs might_fault() coverage) * we have already done object size checks * we have *NOT* done access_ok() recently enough; we rely upon the iovec array having passed sanity checks back when it had been created and not nothing having buggered it since. However, that's very much non-local, so we'd better recheck that. So the thing we want does not match anything in uaccess - we need access_ok + kasan checks + raw copy without any zeroing. Just define such helpers and use them here. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'lib')
-rw-r--r--lib/iov_iter.c55
1 files changed, 39 insertions, 16 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b50a478f9d34..b3b2ee8a20b5 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -130,6 +130,24 @@
130 } \ 130 } \
131} 131}
132 132
133static int copyout(void __user *to, const void *from, size_t n)
134{
135 if (access_ok(VERIFY_WRITE, to, n)) {
136 kasan_check_read(from, n);
137 n = raw_copy_to_user(to, from, n);
138 }
139 return n;
140}
141
142static int copyin(void *to, const void __user *from, size_t n)
143{
144 if (access_ok(VERIFY_READ, from, n)) {
145 kasan_check_write(to, n);
146 n = raw_copy_from_user(to, from, n);
147 }
148 return n;
149}
150
133static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 151static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
134 struct iov_iter *i) 152 struct iov_iter *i)
135{ 153{
@@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
144 if (unlikely(!bytes)) 162 if (unlikely(!bytes))
145 return 0; 163 return 0;
146 164
165 might_fault();
147 wanted = bytes; 166 wanted = bytes;
148 iov = i->iov; 167 iov = i->iov;
149 skip = i->iov_offset; 168 skip = i->iov_offset;
@@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
155 from = kaddr + offset; 174 from = kaddr + offset;
156 175
157 /* first chunk, usually the only one */ 176 /* first chunk, usually the only one */
158 left = __copy_to_user_inatomic(buf, from, copy); 177 left = copyout(buf, from, copy);
159 copy -= left; 178 copy -= left;
160 skip += copy; 179 skip += copy;
161 from += copy; 180 from += copy;
@@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
165 iov++; 184 iov++;
166 buf = iov->iov_base; 185 buf = iov->iov_base;
167 copy = min(bytes, iov->iov_len); 186 copy = min(bytes, iov->iov_len);
168 left = __copy_to_user_inatomic(buf, from, copy); 187 left = copyout(buf, from, copy);
169 copy -= left; 188 copy -= left;
170 skip = copy; 189 skip = copy;
171 from += copy; 190 from += copy;
@@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
184 203
185 kaddr = kmap(page); 204 kaddr = kmap(page);
186 from = kaddr + offset; 205 from = kaddr + offset;
187 left = __copy_to_user(buf, from, copy); 206 left = copyout(buf, from, copy);
188 copy -= left; 207 copy -= left;
189 skip += copy; 208 skip += copy;
190 from += copy; 209 from += copy;
@@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
193 iov++; 212 iov++;
194 buf = iov->iov_base; 213 buf = iov->iov_base;
195 copy = min(bytes, iov->iov_len); 214 copy = min(bytes, iov->iov_len);
196 left = __copy_to_user(buf, from, copy); 215 left = copyout(buf, from, copy);
197 copy -= left; 216 copy -= left;
198 skip = copy; 217 skip = copy;
199 from += copy; 218 from += copy;
@@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
227 if (unlikely(!bytes)) 246 if (unlikely(!bytes))
228 return 0; 247 return 0;
229 248
249 might_fault();
230 wanted = bytes; 250 wanted = bytes;
231 iov = i->iov; 251 iov = i->iov;
232 skip = i->iov_offset; 252 skip = i->iov_offset;
@@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
238 to = kaddr + offset; 258 to = kaddr + offset;
239 259
240 /* first chunk, usually the only one */ 260 /* first chunk, usually the only one */
241 left = __copy_from_user_inatomic(to, buf, copy); 261 left = copyin(to, buf, copy);
242 copy -= left; 262 copy -= left;
243 skip += copy; 263 skip += copy;
244 to += copy; 264 to += copy;
@@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
248 iov++; 268 iov++;
249 buf = iov->iov_base; 269 buf = iov->iov_base;
250 copy = min(bytes, iov->iov_len); 270 copy = min(bytes, iov->iov_len);
251 left = __copy_from_user_inatomic(to, buf, copy); 271 left = copyin(to, buf, copy);
252 copy -= left; 272 copy -= left;
253 skip = copy; 273 skip = copy;
254 to += copy; 274 to += copy;
@@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
267 287
268 kaddr = kmap(page); 288 kaddr = kmap(page);
269 to = kaddr + offset; 289 to = kaddr + offset;
270 left = __copy_from_user(to, buf, copy); 290 left = copyin(to, buf, copy);
271 copy -= left; 291 copy -= left;
272 skip += copy; 292 skip += copy;
273 to += copy; 293 to += copy;
@@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
276 iov++; 296 iov++;
277 buf = iov->iov_base; 297 buf = iov->iov_base;
278 copy = min(bytes, iov->iov_len); 298 copy = min(bytes, iov->iov_len);
279 left = __copy_from_user(to, buf, copy); 299 left = copyin(to, buf, copy);
280 copy -= left; 300 copy -= left;
281 skip = copy; 301 skip = copy;
282 to += copy; 302 to += copy;
@@ -540,9 +560,10 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
540 const char *from = addr; 560 const char *from = addr;
541 if (unlikely(i->type & ITER_PIPE)) 561 if (unlikely(i->type & ITER_PIPE))
542 return copy_pipe_to_iter(addr, bytes, i); 562 return copy_pipe_to_iter(addr, bytes, i);
563 if (iter_is_iovec(i))
564 might_fault();
543 iterate_and_advance(i, bytes, v, 565 iterate_and_advance(i, bytes, v,
544 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, 566 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
545 v.iov_len),
546 memcpy_to_page(v.bv_page, v.bv_offset, 567 memcpy_to_page(v.bv_page, v.bv_offset,
547 (from += v.bv_len) - v.bv_len, v.bv_len), 568 (from += v.bv_len) - v.bv_len, v.bv_len),
548 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) 569 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
@@ -559,9 +580,10 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
559 WARN_ON(1); 580 WARN_ON(1);
560 return 0; 581 return 0;
561 } 582 }
583 if (iter_is_iovec(i))
584 might_fault();
562 iterate_and_advance(i, bytes, v, 585 iterate_and_advance(i, bytes, v,
563 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, 586 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
564 v.iov_len),
565 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 587 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
566 v.bv_offset, v.bv_len), 588 v.bv_offset, v.bv_len),
567 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 589 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
@@ -581,8 +603,10 @@ bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
581 if (unlikely(i->count < bytes)) 603 if (unlikely(i->count < bytes))
582 return false; 604 return false;
583 605
606 if (iter_is_iovec(i))
607 might_fault();
584 iterate_all_kinds(i, bytes, v, ({ 608 iterate_all_kinds(i, bytes, v, ({
585 if (__copy_from_user((to += v.iov_len) - v.iov_len, 609 if (copyin((to += v.iov_len) - v.iov_len,
586 v.iov_base, v.iov_len)) 610 v.iov_base, v.iov_len))
587 return false; 611 return false;
588 0;}), 612 0;}),
@@ -713,7 +737,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
713 if (unlikely(i->type & ITER_PIPE)) 737 if (unlikely(i->type & ITER_PIPE))
714 return pipe_zero(bytes, i); 738 return pipe_zero(bytes, i);
715 iterate_and_advance(i, bytes, v, 739 iterate_and_advance(i, bytes, v,
716 __clear_user(v.iov_base, v.iov_len), 740 clear_user(v.iov_base, v.iov_len),
717 memzero_page(v.bv_page, v.bv_offset, v.bv_len), 741 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
718 memset(v.iov_base, 0, v.iov_len) 742 memset(v.iov_base, 0, v.iov_len)
719 ) 743 )
@@ -736,8 +760,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
736 return 0; 760 return 0;
737 } 761 }
738 iterate_all_kinds(i, bytes, v, 762 iterate_all_kinds(i, bytes, v,
739 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, 763 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
740 v.iov_base, v.iov_len),
741 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 764 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
742 v.bv_offset, v.bv_len), 765 v.bv_offset, v.bv_len),
743 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 766 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)