diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2009-10-26 19:50:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-29 10:39:32 -0400 |
commit | c36987e2ef32e1bb7850379515f21187cba44754 (patch) | |
tree | 0b0a6b6a54c2a80de86426a74367ec4b1f089b61 | |
parent | 2545f038f4af0ff9945d47c10f988418dda50140 (diff) |
mm: don't call pte_unmap() against an improper pte
There are some places where we do like:
pte = pte_map();
do {
(do break in some conditions)
} while (pte++, ...);
pte_unmap(pte - 1);
But if the loop breaks at the first loop, pte_unmap() unmaps invalid pte.
This patch is a fix for this problem.
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Reviewd-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memory.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c index 7e91b5f9f690..60ea601e03ea 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -641,6 +641,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
641 | pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, | 641 | pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, |
642 | unsigned long addr, unsigned long end) | 642 | unsigned long addr, unsigned long end) |
643 | { | 643 | { |
644 | pte_t *orig_src_pte, *orig_dst_pte; | ||
644 | pte_t *src_pte, *dst_pte; | 645 | pte_t *src_pte, *dst_pte; |
645 | spinlock_t *src_ptl, *dst_ptl; | 646 | spinlock_t *src_ptl, *dst_ptl; |
646 | int progress = 0; | 647 | int progress = 0; |
@@ -654,6 +655,8 @@ again: | |||
654 | src_pte = pte_offset_map_nested(src_pmd, addr); | 655 | src_pte = pte_offset_map_nested(src_pmd, addr); |
655 | src_ptl = pte_lockptr(src_mm, src_pmd); | 656 | src_ptl = pte_lockptr(src_mm, src_pmd); |
656 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); | 657 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
658 | orig_src_pte = src_pte; | ||
659 | orig_dst_pte = dst_pte; | ||
657 | arch_enter_lazy_mmu_mode(); | 660 | arch_enter_lazy_mmu_mode(); |
658 | 661 | ||
659 | do { | 662 | do { |
@@ -677,9 +680,9 @@ again: | |||
677 | 680 | ||
678 | arch_leave_lazy_mmu_mode(); | 681 | arch_leave_lazy_mmu_mode(); |
679 | spin_unlock(src_ptl); | 682 | spin_unlock(src_ptl); |
680 | pte_unmap_nested(src_pte - 1); | 683 | pte_unmap_nested(orig_src_pte); |
681 | add_mm_rss(dst_mm, rss[0], rss[1]); | 684 | add_mm_rss(dst_mm, rss[0], rss[1]); |
682 | pte_unmap_unlock(dst_pte - 1, dst_ptl); | 685 | pte_unmap_unlock(orig_dst_pte, dst_ptl); |
683 | cond_resched(); | 686 | cond_resched(); |
684 | if (addr != end) | 687 | if (addr != end) |
685 | goto again; | 688 | goto again; |
@@ -1820,10 +1823,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, | |||
1820 | token = pmd_pgtable(*pmd); | 1823 | token = pmd_pgtable(*pmd); |
1821 | 1824 | ||
1822 | do { | 1825 | do { |
1823 | err = fn(pte, token, addr, data); | 1826 | err = fn(pte++, token, addr, data); |
1824 | if (err) | 1827 | if (err) |
1825 | break; | 1828 | break; |
1826 | } while (pte++, addr += PAGE_SIZE, addr != end); | 1829 | } while (addr += PAGE_SIZE, addr != end); |
1827 | 1830 | ||
1828 | arch_leave_lazy_mmu_mode(); | 1831 | arch_leave_lazy_mmu_mode(); |
1829 | 1832 | ||