aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mprotect.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-10-25 08:16:32 -0400
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:44 -0500
commit4b10e7d562c90d0a72f324832c26653947a07381 (patch)
tree733e0fc3ac3fdfe27a312bc72e4ffb07bbf0aa56 /mm/mprotect.c
parentb24f53a0bea38b266d219ee651b22dba727c44ae (diff)
mm: mempolicy: Implement change_prot_numa() in terms of change_protection()
This patch converts change_prot_numa() to use change_protection(). As pte_numa and friends check the PTE bits directly it is necessary for change_protection() to use pmd_mknuma(). Hence the required modifications to change_protection() are a little clumsy but the end result is that most of the numa page table helpers are just one or two instructions. Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r--mm/mprotect.c72
1 files changed, 56 insertions, 16 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7c3628a8b486..7ef6ae964e8f 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -35,10 +35,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
35} 35}
36#endif 36#endif
37 37
38static unsigned long change_pte_range(struct mm_struct *mm, pmd_t *pmd, 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39 unsigned long addr, unsigned long end, pgprot_t newprot, 39 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable) 40 int dirty_accountable, int prot_numa)
41{ 41{
42 struct mm_struct *mm = vma->vm_mm;
42 pte_t *pte, oldpte; 43 pte_t *pte, oldpte;
43 spinlock_t *ptl; 44 spinlock_t *ptl;
44 unsigned long pages = 0; 45 unsigned long pages = 0;
@@ -49,19 +50,39 @@ static unsigned long change_pte_range(struct mm_struct *mm, pmd_t *pmd,
49 oldpte = *pte; 50 oldpte = *pte;
50 if (pte_present(oldpte)) { 51 if (pte_present(oldpte)) {
51 pte_t ptent; 52 pte_t ptent;
53 bool updated = false;
52 54
53 ptent = ptep_modify_prot_start(mm, addr, pte); 55 ptent = ptep_modify_prot_start(mm, addr, pte);
54 ptent = pte_modify(ptent, newprot); 56 if (!prot_numa) {
57 ptent = pte_modify(ptent, newprot);
58 updated = true;
59 } else {
60 struct page *page;
61
62 page = vm_normal_page(vma, addr, oldpte);
63 if (page) {
64 /* only check non-shared pages */
65 if (!pte_numa(oldpte) &&
66 page_mapcount(page) == 1) {
67 ptent = pte_mknuma(ptent);
68 updated = true;
69 }
70 }
71 }
55 72
56 /* 73 /*
57 * Avoid taking write faults for pages we know to be 74 * Avoid taking write faults for pages we know to be
58 * dirty. 75 * dirty.
59 */ 76 */
60 if (dirty_accountable && pte_dirty(ptent)) 77 if (dirty_accountable && pte_dirty(ptent)) {
61 ptent = pte_mkwrite(ptent); 78 ptent = pte_mkwrite(ptent);
79 updated = true;
80 }
81
82 if (updated)
83 pages++;
62 84
63 ptep_modify_prot_commit(mm, addr, pte, ptent); 85 ptep_modify_prot_commit(mm, addr, pte, ptent);
64 pages++;
65 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { 86 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
66 swp_entry_t entry = pte_to_swp_entry(oldpte); 87 swp_entry_t entry = pte_to_swp_entry(oldpte);
67 88
@@ -83,9 +104,25 @@ static unsigned long change_pte_range(struct mm_struct *mm, pmd_t *pmd,
83 return pages; 104 return pages;
84} 105}
85 106
107#ifdef CONFIG_NUMA_BALANCING
108static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
109 pmd_t *pmd)
110{
111 spin_lock(&mm->page_table_lock);
112 set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
113 spin_unlock(&mm->page_table_lock);
114}
115#else
116static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
117 pmd_t *pmd)
118{
119 BUG();
120}
121#endif /* CONFIG_NUMA_BALANCING */
122
86static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, 123static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
87 unsigned long addr, unsigned long end, pgprot_t newprot, 124 unsigned long addr, unsigned long end, pgprot_t newprot,
88 int dirty_accountable) 125 int dirty_accountable, int prot_numa)
89{ 126{
90 pmd_t *pmd; 127 pmd_t *pmd;
91 unsigned long next; 128 unsigned long next;
@@ -97,7 +134,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *
97 if (pmd_trans_huge(*pmd)) { 134 if (pmd_trans_huge(*pmd)) {
98 if (next - addr != HPAGE_PMD_SIZE) 135 if (next - addr != HPAGE_PMD_SIZE)
99 split_huge_page_pmd(vma->vm_mm, pmd); 136 split_huge_page_pmd(vma->vm_mm, pmd);
100 else if (change_huge_pmd(vma, pmd, addr, newprot)) { 137 else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) {
101 pages += HPAGE_PMD_NR; 138 pages += HPAGE_PMD_NR;
102 continue; 139 continue;
103 } 140 }
@@ -105,8 +142,11 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *
105 } 142 }
106 if (pmd_none_or_clear_bad(pmd)) 143 if (pmd_none_or_clear_bad(pmd))
107 continue; 144 continue;
108 pages += change_pte_range(vma->vm_mm, pmd, addr, next, newprot, 145 pages += change_pte_range(vma, pmd, addr, next, newprot,
109 dirty_accountable); 146 dirty_accountable, prot_numa);
147
148 if (prot_numa)
149 change_pmd_protnuma(vma->vm_mm, addr, pmd);
110 } while (pmd++, addr = next, addr != end); 150 } while (pmd++, addr = next, addr != end);
111 151
112 return pages; 152 return pages;
@@ -114,7 +154,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *
114 154
115static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 155static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
116 unsigned long addr, unsigned long end, pgprot_t newprot, 156 unsigned long addr, unsigned long end, pgprot_t newprot,
117 int dirty_accountable) 157 int dirty_accountable, int prot_numa)
118{ 158{
119 pud_t *pud; 159 pud_t *pud;
120 unsigned long next; 160 unsigned long next;
@@ -126,7 +166,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *
126 if (pud_none_or_clear_bad(pud)) 166 if (pud_none_or_clear_bad(pud))
127 continue; 167 continue;
128 pages += change_pmd_range(vma, pud, addr, next, newprot, 168 pages += change_pmd_range(vma, pud, addr, next, newprot,
129 dirty_accountable); 169 dirty_accountable, prot_numa);
130 } while (pud++, addr = next, addr != end); 170 } while (pud++, addr = next, addr != end);
131 171
132 return pages; 172 return pages;
@@ -134,7 +174,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *
134 174
135static unsigned long change_protection_range(struct vm_area_struct *vma, 175static unsigned long change_protection_range(struct vm_area_struct *vma,
136 unsigned long addr, unsigned long end, pgprot_t newprot, 176 unsigned long addr, unsigned long end, pgprot_t newprot,
137 int dirty_accountable) 177 int dirty_accountable, int prot_numa)
138{ 178{
139 struct mm_struct *mm = vma->vm_mm; 179 struct mm_struct *mm = vma->vm_mm;
140 pgd_t *pgd; 180 pgd_t *pgd;
@@ -150,7 +190,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
150 if (pgd_none_or_clear_bad(pgd)) 190 if (pgd_none_or_clear_bad(pgd))
151 continue; 191 continue;
152 pages += change_pud_range(vma, pgd, addr, next, newprot, 192 pages += change_pud_range(vma, pgd, addr, next, newprot,
153 dirty_accountable); 193 dirty_accountable, prot_numa);
154 } while (pgd++, addr = next, addr != end); 194 } while (pgd++, addr = next, addr != end);
155 195
156 /* Only flush the TLB if we actually modified any entries: */ 196 /* Only flush the TLB if we actually modified any entries: */
@@ -162,7 +202,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
162 202
163unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 203unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
164 unsigned long end, pgprot_t newprot, 204 unsigned long end, pgprot_t newprot,
165 int dirty_accountable) 205 int dirty_accountable, int prot_numa)
166{ 206{
167 struct mm_struct *mm = vma->vm_mm; 207 struct mm_struct *mm = vma->vm_mm;
168 unsigned long pages; 208 unsigned long pages;
@@ -171,7 +211,7 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
171 if (is_vm_hugetlb_page(vma)) 211 if (is_vm_hugetlb_page(vma))
172 pages = hugetlb_change_protection(vma, start, end, newprot); 212 pages = hugetlb_change_protection(vma, start, end, newprot);
173 else 213 else
174 pages = change_protection_range(vma, start, end, newprot, dirty_accountable); 214 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
175 mmu_notifier_invalidate_range_end(mm, start, end); 215 mmu_notifier_invalidate_range_end(mm, start, end);
176 216
177 return pages; 217 return pages;
@@ -249,7 +289,7 @@ success:
249 dirty_accountable = 1; 289 dirty_accountable = 1;
250 } 290 }
251 291
252 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); 292 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0);
253 293
254 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 294 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
255 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 295 vm_stat_account(mm, newflags, vma->vm_file, nrpages);