diff options
author | Hugh Dickins <hugh@veritas.com> | 2006-12-10 05:18:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 12:55:39 -0500 |
commit | 5fcf7bb73f66cc1c4ad90788b0f367c4d6852b75 (patch) | |
tree | 76854ba1babc308beaf8f19d299a5b32ab7fda30 /drivers/char/mem.c | |
parent | 347a00fb4ad2200f8f8331f8b366b1d84eff577d (diff) |
[PATCH] read_zero_pagealigned() locking fix
Ramiro Voicu hits the BUG_ON(!pte_none(*pte)) in zeromap_pte_range: kernel
bugzilla 7645. Right: read_zero_pagealigned uses down_read of mmap_sem,
but another thread's racing read of /dev/zero, or a normal fault, can
easily set that pte again, in between zap_page_range and zeromap_page_range
getting there. It's been wrong ever since 2.4.3.
The simple fix is to use down_write instead, but that would serialize reads
of /dev/zero more than at present: perhaps some app would be badly
affected. So instead let zeromap_page_range return the error instead of
BUG_ON, and read_zero_pagealigned break to the slower clear_user loop in
that case - there's no need to optimize for it.
Use -EEXIST for when a pte is found: BUG_ON in mmap_zero (the other user of
zeromap_page_range), though it really isn't interesting there. And since
mmap_zero wants -EAGAIN for out-of-memory, the zeromaps better return that
than -ENOMEM.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Ramiro Voicu: <Ramiro.Voicu@cern.ch>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/char/mem.c')
-rw-r--r-- | drivers/char/mem.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 089020e0ee5a..4f1813e04754 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -646,7 +646,8 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size) | |||
646 | count = size; | 646 | count = size; |
647 | 647 | ||
648 | zap_page_range(vma, addr, count, NULL); | 648 | zap_page_range(vma, addr, count, NULL); |
649 | zeromap_page_range(vma, addr, count, PAGE_COPY); | 649 | if (zeromap_page_range(vma, addr, count, PAGE_COPY)) |
650 | break; | ||
650 | 651 | ||
651 | size -= count; | 652 | size -= count; |
652 | buf += count; | 653 | buf += count; |
@@ -713,11 +714,14 @@ out: | |||
713 | 714 | ||
714 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | 715 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) |
715 | { | 716 | { |
717 | int err; | ||
718 | |||
716 | if (vma->vm_flags & VM_SHARED) | 719 | if (vma->vm_flags & VM_SHARED) |
717 | return shmem_zero_setup(vma); | 720 | return shmem_zero_setup(vma); |
718 | if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) | 721 | err = zeromap_page_range(vma, vma->vm_start, |
719 | return -EAGAIN; | 722 | vma->vm_end - vma->vm_start, vma->vm_page_prot); |
720 | return 0; | 723 | BUG_ON(err == -EEXIST); |
724 | return err; | ||
721 | } | 725 | } |
722 | #else /* CONFIG_MMU */ | 726 | #else /* CONFIG_MMU */ |
723 | static ssize_t read_zero(struct file * file, char * buf, | 727 | static ssize_t read_zero(struct file * file, char * buf, |