summaryrefslogtreecommitdiffstats
path: root/lib/iov_iter.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-07 23:39:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-07 23:39:20 -0400
commit6a37e94009b1a76d415b2759755f5cc7854c4ff6 (patch)
tree0c070f5bb1ab399460554a27fc6cc91c02e50e64 /lib/iov_iter.c
parentda029c11e6b12f321f36dac8771e833b65cec962 (diff)
parent09fc68dc66f7597bdc8898c991609a48f061bed5 (diff)
Merge branch 'uaccess-work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull iov_iter hardening from Al Viro: "This is the iov_iter/uaccess/hardening pile. For one thing, it trims the inline part of copy_to_user/copy_from_user to the minimum that *does* need to be inlined - object size checks, basically. For another, it sanitizes the checks for iov_iter primitives. There are 4 groups of checks: access_ok(), might_fault(), object size and KASAN. - access_ok() had been verified by whoever had set the iov_iter up. However, that has happened in a function far away, so proving that there's no path to actual copying bypassing those checks is hard and proving that iov_iter has not been buggered in the meanwhile is also not pleasant. So we want those redone in actual copyin/copyout. - might_fault() is better off consolidated - we know whether it needs to be checked as soon as we enter iov_iter primitive and observe the iov_iter flavour. No need to wait until the copyin/copyout. The call chains are short enough to make sure we won't miss anything - in fact, it's more robust that way, since there are cases where we do e.g. forced fault-in before getting to copyin/copyout. It's not quite what we need to check (in particular, combination of iovec-backed and set_fs(KERNEL_DS) is almost certainly a bug, not a cause to skip checks), but that's for later series. For now let's keep might_fault(). - KASAN checks belong in copyin/copyout - at the same level where other iov_iter flavours would've hit them in memcpy(). - object size checks should apply to *all* iov_iter flavours, not just iovec-backed ones. There are two groups of primitives - one gets the kernel object described as pointer + size (copy_to_iter(), etc.) while another gets it as page + offset + size (copy_page_to_iter(), etc.) For the first group the checks are best done where we actually have a chance to find the object size. In other words, those belong in inline wrappers in uio.h, before calling into iov_iter.c. Same kind as we have for inlined part of copy_to_user(). For the second group there is no object to look at - offset in page is just a number, it bears no type information. So we do them in the common helper called by iov_iter.c primitives of that kind. All it currently does is checking that we are not trying to access outside of the compound page; eventually we might want to add some sanity checks on the page involved. So the things we need in copyin/copyout part of iov_iter.c do not quite match anything in uaccess.h (we want no zeroing, we *do* want access_ok() and KASAN and we want no might_fault() or object size checks done on that level). OTOH, these needs are simple enough to provide a couple of helpers (static in iov_iter.c) doing just what we need..." * 'uaccess-work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: iov_iter: saner checks on copyin/copyout iov_iter: sanity checks for copy to/from page primitives iov_iter/hardening: move object size checks to inlined part copy_{to,from}_user(): consolidate object size checks copy_{from,to}_user(): move kasan checks and might_fault() out-of-line
Diffstat (limited to 'lib/iov_iter.c')
-rw-r--r--lib/iov_iter.c98
1 files changed, 69 insertions, 29 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index c9a69064462f..52c8dd6d8e82 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -130,6 +130,24 @@
130 } \ 130 } \
131} 131}
132 132
133static int copyout(void __user *to, const void *from, size_t n)
134{
135 if (access_ok(VERIFY_WRITE, to, n)) {
136 kasan_check_read(from, n);
137 n = raw_copy_to_user(to, from, n);
138 }
139 return n;
140}
141
142static int copyin(void *to, const void __user *from, size_t n)
143{
144 if (access_ok(VERIFY_READ, from, n)) {
145 kasan_check_write(to, n);
146 n = raw_copy_from_user(to, from, n);
147 }
148 return n;
149}
150
133static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 151static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
134 struct iov_iter *i) 152 struct iov_iter *i)
135{ 153{
@@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
144 if (unlikely(!bytes)) 162 if (unlikely(!bytes))
145 return 0; 163 return 0;
146 164
165 might_fault();
147 wanted = bytes; 166 wanted = bytes;
148 iov = i->iov; 167 iov = i->iov;
149 skip = i->iov_offset; 168 skip = i->iov_offset;
@@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
155 from = kaddr + offset; 174 from = kaddr + offset;
156 175
157 /* first chunk, usually the only one */ 176 /* first chunk, usually the only one */
158 left = __copy_to_user_inatomic(buf, from, copy); 177 left = copyout(buf, from, copy);
159 copy -= left; 178 copy -= left;
160 skip += copy; 179 skip += copy;
161 from += copy; 180 from += copy;
@@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
165 iov++; 184 iov++;
166 buf = iov->iov_base; 185 buf = iov->iov_base;
167 copy = min(bytes, iov->iov_len); 186 copy = min(bytes, iov->iov_len);
168 left = __copy_to_user_inatomic(buf, from, copy); 187 left = copyout(buf, from, copy);
169 copy -= left; 188 copy -= left;
170 skip = copy; 189 skip = copy;
171 from += copy; 190 from += copy;
@@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
184 203
185 kaddr = kmap(page); 204 kaddr = kmap(page);
186 from = kaddr + offset; 205 from = kaddr + offset;
187 left = __copy_to_user(buf, from, copy); 206 left = copyout(buf, from, copy);
188 copy -= left; 207 copy -= left;
189 skip += copy; 208 skip += copy;
190 from += copy; 209 from += copy;
@@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
193 iov++; 212 iov++;
194 buf = iov->iov_base; 213 buf = iov->iov_base;
195 copy = min(bytes, iov->iov_len); 214 copy = min(bytes, iov->iov_len);
196 left = __copy_to_user(buf, from, copy); 215 left = copyout(buf, from, copy);
197 copy -= left; 216 copy -= left;
198 skip = copy; 217 skip = copy;
199 from += copy; 218 from += copy;
@@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
227 if (unlikely(!bytes)) 246 if (unlikely(!bytes))
228 return 0; 247 return 0;
229 248
249 might_fault();
230 wanted = bytes; 250 wanted = bytes;
231 iov = i->iov; 251 iov = i->iov;
232 skip = i->iov_offset; 252 skip = i->iov_offset;
@@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
238 to = kaddr + offset; 258 to = kaddr + offset;
239 259
240 /* first chunk, usually the only one */ 260 /* first chunk, usually the only one */
241 left = __copy_from_user_inatomic(to, buf, copy); 261 left = copyin(to, buf, copy);
242 copy -= left; 262 copy -= left;
243 skip += copy; 263 skip += copy;
244 to += copy; 264 to += copy;
@@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
248 iov++; 268 iov++;
249 buf = iov->iov_base; 269 buf = iov->iov_base;
250 copy = min(bytes, iov->iov_len); 270 copy = min(bytes, iov->iov_len);
251 left = __copy_from_user_inatomic(to, buf, copy); 271 left = copyin(to, buf, copy);
252 copy -= left; 272 copy -= left;
253 skip = copy; 273 skip = copy;
254 to += copy; 274 to += copy;
@@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
267 287
268 kaddr = kmap(page); 288 kaddr = kmap(page);
269 to = kaddr + offset; 289 to = kaddr + offset;
270 left = __copy_from_user(to, buf, copy); 290 left = copyin(to, buf, copy);
271 copy -= left; 291 copy -= left;
272 skip += copy; 292 skip += copy;
273 to += copy; 293 to += copy;
@@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
276 iov++; 296 iov++;
277 buf = iov->iov_base; 297 buf = iov->iov_base;
278 copy = min(bytes, iov->iov_len); 298 copy = min(bytes, iov->iov_len);
279 left = __copy_from_user(to, buf, copy); 299 left = copyin(to, buf, copy);
280 copy -= left; 300 copy -= left;
281 skip = copy; 301 skip = copy;
282 to += copy; 302 to += copy;
@@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
535 return bytes; 555 return bytes;
536} 556}
537 557
538size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 558size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
539{ 559{
540 const char *from = addr; 560 const char *from = addr;
541 if (unlikely(i->type & ITER_PIPE)) 561 if (unlikely(i->type & ITER_PIPE))
542 return copy_pipe_to_iter(addr, bytes, i); 562 return copy_pipe_to_iter(addr, bytes, i);
563 if (iter_is_iovec(i))
564 might_fault();
543 iterate_and_advance(i, bytes, v, 565 iterate_and_advance(i, bytes, v,
544 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, 566 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
545 v.iov_len),
546 memcpy_to_page(v.bv_page, v.bv_offset, 567 memcpy_to_page(v.bv_page, v.bv_offset,
547 (from += v.bv_len) - v.bv_len, v.bv_len), 568 (from += v.bv_len) - v.bv_len, v.bv_len),
548 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) 569 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
@@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
550 571
551 return bytes; 572 return bytes;
552} 573}
553EXPORT_SYMBOL(copy_to_iter); 574EXPORT_SYMBOL(_copy_to_iter);
554 575
555size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 576size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
556{ 577{
557 char *to = addr; 578 char *to = addr;
558 if (unlikely(i->type & ITER_PIPE)) { 579 if (unlikely(i->type & ITER_PIPE)) {
559 WARN_ON(1); 580 WARN_ON(1);
560 return 0; 581 return 0;
561 } 582 }
583 if (iter_is_iovec(i))
584 might_fault();
562 iterate_and_advance(i, bytes, v, 585 iterate_and_advance(i, bytes, v,
563 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, 586 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
564 v.iov_len),
565 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 587 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
566 v.bv_offset, v.bv_len), 588 v.bv_offset, v.bv_len),
567 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 589 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
@@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
569 591
570 return bytes; 592 return bytes;
571} 593}
572EXPORT_SYMBOL(copy_from_iter); 594EXPORT_SYMBOL(_copy_from_iter);
573 595
574bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 596bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
575{ 597{
576 char *to = addr; 598 char *to = addr;
577 if (unlikely(i->type & ITER_PIPE)) { 599 if (unlikely(i->type & ITER_PIPE)) {
@@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
581 if (unlikely(i->count < bytes)) 603 if (unlikely(i->count < bytes))
582 return false; 604 return false;
583 605
606 if (iter_is_iovec(i))
607 might_fault();
584 iterate_all_kinds(i, bytes, v, ({ 608 iterate_all_kinds(i, bytes, v, ({
585 if (__copy_from_user((to += v.iov_len) - v.iov_len, 609 if (copyin((to += v.iov_len) - v.iov_len,
586 v.iov_base, v.iov_len)) 610 v.iov_base, v.iov_len))
587 return false; 611 return false;
588 0;}), 612 0;}),
@@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
594 iov_iter_advance(i, bytes); 618 iov_iter_advance(i, bytes);
595 return true; 619 return true;
596} 620}
597EXPORT_SYMBOL(copy_from_iter_full); 621EXPORT_SYMBOL(_copy_from_iter_full);
598 622
599size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 623size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
600{ 624{
601 char *to = addr; 625 char *to = addr;
602 if (unlikely(i->type & ITER_PIPE)) { 626 if (unlikely(i->type & ITER_PIPE)) {
@@ -613,10 +637,10 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
613 637
614 return bytes; 638 return bytes;
615} 639}
616EXPORT_SYMBOL(copy_from_iter_nocache); 640EXPORT_SYMBOL(_copy_from_iter_nocache);
617 641
618#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 642#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
619size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 643size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
620{ 644{
621 char *to = addr; 645 char *to = addr;
622 if (unlikely(i->type & ITER_PIPE)) { 646 if (unlikely(i->type & ITER_PIPE)) {
@@ -634,10 +658,10 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
634 658
635 return bytes; 659 return bytes;
636} 660}
637EXPORT_SYMBOL_GPL(copy_from_iter_flushcache); 661EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
638#endif 662#endif
639 663
640bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 664bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
641{ 665{
642 char *to = addr; 666 char *to = addr;
643 if (unlikely(i->type & ITER_PIPE)) { 667 if (unlikely(i->type & ITER_PIPE)) {
@@ -659,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
659 iov_iter_advance(i, bytes); 683 iov_iter_advance(i, bytes);
660 return true; 684 return true;
661} 685}
662EXPORT_SYMBOL(copy_from_iter_full_nocache); 686EXPORT_SYMBOL(_copy_from_iter_full_nocache);
687
688static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
689{
690 size_t v = n + offset;
691 if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
692 return true;
693 WARN_ON(1);
694 return false;
695}
663 696
664size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 697size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
665 struct iov_iter *i) 698 struct iov_iter *i)
666{ 699{
700 if (unlikely(!page_copy_sane(page, offset, bytes)))
701 return 0;
667 if (i->type & (ITER_BVEC|ITER_KVEC)) { 702 if (i->type & (ITER_BVEC|ITER_KVEC)) {
668 void *kaddr = kmap_atomic(page); 703 void *kaddr = kmap_atomic(page);
669 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 704 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
@@ -679,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter);
679size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 714size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
680 struct iov_iter *i) 715 struct iov_iter *i)
681{ 716{
717 if (unlikely(!page_copy_sane(page, offset, bytes)))
718 return 0;
682 if (unlikely(i->type & ITER_PIPE)) { 719 if (unlikely(i->type & ITER_PIPE)) {
683 WARN_ON(1); 720 WARN_ON(1);
684 return 0; 721 return 0;
685 } 722 }
686 if (i->type & (ITER_BVEC|ITER_KVEC)) { 723 if (i->type & (ITER_BVEC|ITER_KVEC)) {
687 void *kaddr = kmap_atomic(page); 724 void *kaddr = kmap_atomic(page);
688 size_t wanted = copy_from_iter(kaddr + offset, bytes, i); 725 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
689 kunmap_atomic(kaddr); 726 kunmap_atomic(kaddr);
690 return wanted; 727 return wanted;
691 } else 728 } else
@@ -722,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
722 if (unlikely(i->type & ITER_PIPE)) 759 if (unlikely(i->type & ITER_PIPE))
723 return pipe_zero(bytes, i); 760 return pipe_zero(bytes, i);
724 iterate_and_advance(i, bytes, v, 761 iterate_and_advance(i, bytes, v,
725 __clear_user(v.iov_base, v.iov_len), 762 clear_user(v.iov_base, v.iov_len),
726 memzero_page(v.bv_page, v.bv_offset, v.bv_len), 763 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
727 memset(v.iov_base, 0, v.iov_len) 764 memset(v.iov_base, 0, v.iov_len)
728 ) 765 )
@@ -735,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
735 struct iov_iter *i, unsigned long offset, size_t bytes) 772 struct iov_iter *i, unsigned long offset, size_t bytes)
736{ 773{
737 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 774 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
775 if (unlikely(!page_copy_sane(page, offset, bytes))) {
776 kunmap_atomic(kaddr);
777 return 0;
778 }
738 if (unlikely(i->type & ITER_PIPE)) { 779 if (unlikely(i->type & ITER_PIPE)) {
739 kunmap_atomic(kaddr); 780 kunmap_atomic(kaddr);
740 WARN_ON(1); 781 WARN_ON(1);
741 return 0; 782 return 0;
742 } 783 }
743 iterate_all_kinds(i, bytes, v, 784 iterate_all_kinds(i, bytes, v,
744 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, 785 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
745 v.iov_base, v.iov_len),
746 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 786 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
747 v.bv_offset, v.bv_len), 787 v.bv_offset, v.bv_len),
748 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 788 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)