diff options
author | Al Viro <viro@ZenIV.linux.org.uk> | 2016-09-20 15:07:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-09-20 19:44:28 -0400 |
commit | e23d4159b109167126e5bcd7f3775c95de7fee47 (patch) | |
tree | 15a9480da60c53b2754ed8aa6cf57dab92e9df65 /include/linux/pagemap.h | |
parent | df04abfd181acc276ba6762c8206891ae10ae00d (diff) |
fix fault_in_multipages_...() on architectures with no-op access_ok()
Switching iov_iter fault-in to multipages variants has exposed an old
bug in underlying fault_in_multipages_...(); they break if the range
passed to them wraps around. Normally access_ok() done by callers will
prevent such (and it's a guaranteed EFAULT - ERR_PTR() values fall into
such a range and they should not point to any valid objects).
However, on architectures where userland and kernel live in different
MMU contexts (e.g. s390) access_ok() is a no-op and on those a range
with a wraparound can reach fault_in_multipages_...().
Since any wraparound means EFAULT there, the fix is trivial - turn
those
while (uaddr <= end)
...
into
if (unlikely(uaddr > end))
return -EFAULT;
do
...
while (uaddr <= end);
Reported-by: Jan Stancek <jstancek@redhat.com>
Tested-by: Jan Stancek <jstancek@redhat.com>
Cc: stable@vger.kernel.org # v3.5+
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 66a1260b33de..7e3d53753612 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -571,56 +571,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) | |||
571 | */ | 571 | */ |
572 | static inline int fault_in_multipages_writeable(char __user *uaddr, int size) | 572 | static inline int fault_in_multipages_writeable(char __user *uaddr, int size) |
573 | { | 573 | { |
574 | int ret = 0; | ||
575 | char __user *end = uaddr + size - 1; | 574 | char __user *end = uaddr + size - 1; |
576 | 575 | ||
577 | if (unlikely(size == 0)) | 576 | if (unlikely(size == 0)) |
578 | return ret; | 577 | return 0; |
579 | 578 | ||
579 | if (unlikely(uaddr > end)) | ||
580 | return -EFAULT; | ||
580 | /* | 581 | /* |
581 | * Writing zeroes into userspace here is OK, because we know that if | 582 | * Writing zeroes into userspace here is OK, because we know that if |
582 | * the zero gets there, we'll be overwriting it. | 583 | * the zero gets there, we'll be overwriting it. |
583 | */ | 584 | */ |
584 | while (uaddr <= end) { | 585 | do { |
585 | ret = __put_user(0, uaddr); | 586 | if (unlikely(__put_user(0, uaddr) != 0)) |
586 | if (ret != 0) | 587 | return -EFAULT; |
587 | return ret; | ||
588 | uaddr += PAGE_SIZE; | 588 | uaddr += PAGE_SIZE; |
589 | } | 589 | } while (uaddr <= end); |
590 | 590 | ||
591 | /* Check whether the range spilled into the next page. */ | 591 | /* Check whether the range spilled into the next page. */ |
592 | if (((unsigned long)uaddr & PAGE_MASK) == | 592 | if (((unsigned long)uaddr & PAGE_MASK) == |
593 | ((unsigned long)end & PAGE_MASK)) | 593 | ((unsigned long)end & PAGE_MASK)) |
594 | ret = __put_user(0, end); | 594 | return __put_user(0, end); |
595 | 595 | ||
596 | return ret; | 596 | return 0; |
597 | } | 597 | } |
598 | 598 | ||
599 | static inline int fault_in_multipages_readable(const char __user *uaddr, | 599 | static inline int fault_in_multipages_readable(const char __user *uaddr, |
600 | int size) | 600 | int size) |
601 | { | 601 | { |
602 | volatile char c; | 602 | volatile char c; |
603 | int ret = 0; | ||
604 | const char __user *end = uaddr + size - 1; | 603 | const char __user *end = uaddr + size - 1; |
605 | 604 | ||
606 | if (unlikely(size == 0)) | 605 | if (unlikely(size == 0)) |
607 | return ret; | 606 | return 0; |
608 | 607 | ||
609 | while (uaddr <= end) { | 608 | if (unlikely(uaddr > end)) |
610 | ret = __get_user(c, uaddr); | 609 | return -EFAULT; |
611 | if (ret != 0) | 610 | |
612 | return ret; | 611 | do { |
612 | if (unlikely(__get_user(c, uaddr) != 0)) | ||
613 | return -EFAULT; | ||
613 | uaddr += PAGE_SIZE; | 614 | uaddr += PAGE_SIZE; |
614 | } | 615 | } while (uaddr <= end); |
615 | 616 | ||
616 | /* Check whether the range spilled into the next page. */ | 617 | /* Check whether the range spilled into the next page. */ |
617 | if (((unsigned long)uaddr & PAGE_MASK) == | 618 | if (((unsigned long)uaddr & PAGE_MASK) == |
618 | ((unsigned long)end & PAGE_MASK)) { | 619 | ((unsigned long)end & PAGE_MASK)) { |
619 | ret = __get_user(c, end); | 620 | return __get_user(c, end); |
620 | (void)c; | ||
621 | } | 621 | } |
622 | 622 | ||
623 | return ret; | 623 | return 0; |
624 | } | 624 | } |
625 | 625 | ||
626 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | 626 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |