diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:40 -0400 |
commit | 705e87c0c3c38424f7f30556c85bc20e808d2f59 (patch) | |
tree | 7a237e6266f4801385e1226cc497b47e3a2458bd /mm/mprotect.c | |
parent | 8f4e2101fd7df9031a754eedb82e2060b51f8c45 (diff) |
[PATCH] mm: pte_offset_map_lock loops
Convert those common loops using page_table_lock on the outside and
pte_offset_map within to use just pte_offset_map_lock within instead.
These all hold mmap_sem (some exclusively, some not), so at no level can a
page table be whipped away from beneath them. But whereas pte_alloc loops
tested with the "atomic" pmd_present, these loops are testing with pmd_none,
which on i386 PAE tests both lower and upper halves.
That's now unsafe, so add a cast into pmd_none to test only the vital lower
half: we lose a little sensitivity to a corrupt middle directory, but not
enough to worry about. It appears that i386 and UML were the only
architectures vulnerable in this way, and pgd and pud no problem.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r-- | mm/mprotect.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index 672a76fddd5..17a2b52b753 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -29,8 +29,9 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, | |||
29 | unsigned long addr, unsigned long end, pgprot_t newprot) | 29 | unsigned long addr, unsigned long end, pgprot_t newprot) |
30 | { | 30 | { |
31 | pte_t *pte; | 31 | pte_t *pte; |
32 | spinlock_t *ptl; | ||
32 | 33 | ||
33 | pte = pte_offset_map(pmd, addr); | 34 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
34 | do { | 35 | do { |
35 | if (pte_present(*pte)) { | 36 | if (pte_present(*pte)) { |
36 | pte_t ptent; | 37 | pte_t ptent; |
@@ -44,7 +45,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, | |||
44 | lazy_mmu_prot_update(ptent); | 45 | lazy_mmu_prot_update(ptent); |
45 | } | 46 | } |
46 | } while (pte++, addr += PAGE_SIZE, addr != end); | 47 | } while (pte++, addr += PAGE_SIZE, addr != end); |
47 | pte_unmap(pte - 1); | 48 | pte_unmap_unlock(pte - 1, ptl); |
48 | } | 49 | } |
49 | 50 | ||
50 | static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, | 51 | static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, |
@@ -88,7 +89,6 @@ static void change_protection(struct vm_area_struct *vma, | |||
88 | BUG_ON(addr >= end); | 89 | BUG_ON(addr >= end); |
89 | pgd = pgd_offset(mm, addr); | 90 | pgd = pgd_offset(mm, addr); |
90 | flush_cache_range(vma, addr, end); | 91 | flush_cache_range(vma, addr, end); |
91 | spin_lock(&mm->page_table_lock); | ||
92 | do { | 92 | do { |
93 | next = pgd_addr_end(addr, end); | 93 | next = pgd_addr_end(addr, end); |
94 | if (pgd_none_or_clear_bad(pgd)) | 94 | if (pgd_none_or_clear_bad(pgd)) |
@@ -96,7 +96,6 @@ static void change_protection(struct vm_area_struct *vma, | |||
96 | change_pud_range(mm, pgd, addr, next, newprot); | 96 | change_pud_range(mm, pgd, addr, next, newprot); |
97 | } while (pgd++, addr = next, addr != end); | 97 | } while (pgd++, addr = next, addr != end); |
98 | flush_tlb_range(vma, start, end); | 98 | flush_tlb_range(vma, start, end); |
99 | spin_unlock(&mm->page_table_lock); | ||
100 | } | 99 | } |
101 | 100 | ||
102 | static int | 101 | static int |