diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2011-01-13 18:47:03 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:44 -0500 |
commit | b36f5b0710e9e3b92484de32920fddcb17278664 (patch) | |
tree | 823f5ee3ff0e18c67489bd8e525d83f3f7cf1b1a /mm/mprotect.c | |
parent | c489f1257b8cacd4881a18da1e93659f934a8e98 (diff) |
thp: mprotect: pass vma down to page table walkers
Flushing the tlb for huge pmds requires the vma's anon_vma, so pass along
the vma instead of the mm, we can always get the latter when we need it.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r-- | mm/mprotect.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index bd27db6b992b..9402080d0d4a 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -78,7 +78,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, | |||
78 | pte_unmap_unlock(pte - 1, ptl); | 78 | pte_unmap_unlock(pte - 1, ptl); |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, | 81 | static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
82 | unsigned long addr, unsigned long end, pgprot_t newprot, | 82 | unsigned long addr, unsigned long end, pgprot_t newprot, |
83 | int dirty_accountable) | 83 | int dirty_accountable) |
84 | { | 84 | { |
@@ -88,14 +88,15 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, | |||
88 | pmd = pmd_offset(pud, addr); | 88 | pmd = pmd_offset(pud, addr); |
89 | do { | 89 | do { |
90 | next = pmd_addr_end(addr, end); | 90 | next = pmd_addr_end(addr, end); |
91 | split_huge_page_pmd(mm, pmd); | 91 | split_huge_page_pmd(vma->vm_mm, pmd); |
92 | if (pmd_none_or_clear_bad(pmd)) | 92 | if (pmd_none_or_clear_bad(pmd)) |
93 | continue; | 93 | continue; |
94 | change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); | 94 | change_pte_range(vma->vm_mm, pmd, addr, next, newprot, |
95 | dirty_accountable); | ||
95 | } while (pmd++, addr = next, addr != end); | 96 | } while (pmd++, addr = next, addr != end); |
96 | } | 97 | } |
97 | 98 | ||
98 | static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, | 99 | static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, |
99 | unsigned long addr, unsigned long end, pgprot_t newprot, | 100 | unsigned long addr, unsigned long end, pgprot_t newprot, |
100 | int dirty_accountable) | 101 | int dirty_accountable) |
101 | { | 102 | { |
@@ -107,7 +108,8 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, | |||
107 | next = pud_addr_end(addr, end); | 108 | next = pud_addr_end(addr, end); |
108 | if (pud_none_or_clear_bad(pud)) | 109 | if (pud_none_or_clear_bad(pud)) |
109 | continue; | 110 | continue; |
110 | change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable); | 111 | change_pmd_range(vma, pud, addr, next, newprot, |
112 | dirty_accountable); | ||
111 | } while (pud++, addr = next, addr != end); | 113 | } while (pud++, addr = next, addr != end); |
112 | } | 114 | } |
113 | 115 | ||
@@ -127,7 +129,8 @@ static void change_protection(struct vm_area_struct *vma, | |||
127 | next = pgd_addr_end(addr, end); | 129 | next = pgd_addr_end(addr, end); |
128 | if (pgd_none_or_clear_bad(pgd)) | 130 | if (pgd_none_or_clear_bad(pgd)) |
129 | continue; | 131 | continue; |
130 | change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable); | 132 | change_pud_range(vma, pgd, addr, next, newprot, |
133 | dirty_accountable); | ||
131 | } while (pgd++, addr = next, addr != end); | 134 | } while (pgd++, addr = next, addr != end); |
132 | flush_tlb_range(vma, start, end); | 135 | flush_tlb_range(vma, start, end); |
133 | } | 136 | } |