aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-02-12 17:58:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:08 -0500
commit8a0516ed8b90c95ffa1363b420caa37418149f21 (patch)
tree64f95968661a136fa8a246419680420819dc1e0e /mm
parente7bb4b6d1609cce391af1e7bc6f31d14f1a3a890 (diff)
mm: convert p[te|md]_numa users to p[te|md]_protnone_numa
Convert existing users of pte_numa and friends to the new helper. Note that the kernel is broken after this patch is applied until the other page table modifiers are also altered. This patch layout is to make review easier. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Tested-by: Sasha Levin <sasha.levin@oracle.com> Cc: Dave Jones <davej@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Rik van Riel <riel@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c10
-rw-r--r--mm/huge_memory.c16
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mprotect.c38
-rw-r--r--mm/pgtable-generic.c2
5 files changed, 26 insertions, 44 deletions
diff --git a/mm/gup.c b/mm/gup.c
index c2da1163986a..51bf0b06ca7b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -64,7 +64,7 @@ retry:
64 migration_entry_wait(mm, pmd, address); 64 migration_entry_wait(mm, pmd, address);
65 goto retry; 65 goto retry;
66 } 66 }
67 if ((flags & FOLL_NUMA) && pte_numa(pte)) 67 if ((flags & FOLL_NUMA) && pte_protnone(pte))
68 goto no_page; 68 goto no_page;
69 if ((flags & FOLL_WRITE) && !pte_write(pte)) { 69 if ((flags & FOLL_WRITE) && !pte_write(pte)) {
70 pte_unmap_unlock(ptep, ptl); 70 pte_unmap_unlock(ptep, ptl);
@@ -184,7 +184,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
184 return page; 184 return page;
185 return no_page_table(vma, flags); 185 return no_page_table(vma, flags);
186 } 186 }
187 if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) 187 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
188 return no_page_table(vma, flags); 188 return no_page_table(vma, flags);
189 if (pmd_trans_huge(*pmd)) { 189 if (pmd_trans_huge(*pmd)) {
190 if (flags & FOLL_SPLIT) { 190 if (flags & FOLL_SPLIT) {
@@ -906,10 +906,10 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
906 906
907 /* 907 /*
908 * Similar to the PMD case below, NUMA hinting must take slow 908 * Similar to the PMD case below, NUMA hinting must take slow
909 * path 909 * path using the pte_protnone check.
910 */ 910 */
911 if (!pte_present(pte) || pte_special(pte) || 911 if (!pte_present(pte) || pte_special(pte) ||
912 pte_numa(pte) || (write && !pte_write(pte))) 912 pte_protnone(pte) || (write && !pte_write(pte)))
913 goto pte_unmap; 913 goto pte_unmap;
914 914
915 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 915 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -1104,7 +1104,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1104 * slowpath for accounting purposes and so that they 1104 * slowpath for accounting purposes and so that they
1105 * can be serialised against THP migration. 1105 * can be serialised against THP migration.
1106 */ 1106 */
1107 if (pmd_numa(pmd)) 1107 if (pmd_protnone(pmd))
1108 return 0; 1108 return 0;
1109 1109
1110 if (!gup_huge_pmd(pmd, pmdp, addr, next, write, 1110 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c6921362c5fc..915941c45169 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1211,7 +1211,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1211 return ERR_PTR(-EFAULT); 1211 return ERR_PTR(-EFAULT);
1212 1212
1213 /* Full NUMA hinting faults to serialise migration in fault paths */ 1213 /* Full NUMA hinting faults to serialise migration in fault paths */
1214 if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) 1214 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1215 goto out; 1215 goto out;
1216 1216
1217 page = pmd_page(*pmd); 1217 page = pmd_page(*pmd);
@@ -1342,7 +1342,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1342 1342
1343 /* 1343 /*
1344 * Migrate the THP to the requested node, returns with page unlocked 1344 * Migrate the THP to the requested node, returns with page unlocked
1345 * and pmd_numa cleared. 1345 * and access rights restored.
1346 */ 1346 */
1347 spin_unlock(ptl); 1347 spin_unlock(ptl);
1348 migrated = migrate_misplaced_transhuge_page(mm, vma, 1348 migrated = migrate_misplaced_transhuge_page(mm, vma,
@@ -1357,7 +1357,7 @@ clear_pmdnuma:
1357 BUG_ON(!PageLocked(page)); 1357 BUG_ON(!PageLocked(page));
1358 pmd = pmd_mknonnuma(pmd); 1358 pmd = pmd_mknonnuma(pmd);
1359 set_pmd_at(mm, haddr, pmdp, pmd); 1359 set_pmd_at(mm, haddr, pmdp, pmd);
1360 VM_BUG_ON(pmd_numa(*pmdp)); 1360 VM_BUG_ON(pmd_protnone(*pmdp));
1361 update_mmu_cache_pmd(vma, addr, pmdp); 1361 update_mmu_cache_pmd(vma, addr, pmdp);
1362 unlock_page(page); 1362 unlock_page(page);
1363out_unlock: 1363out_unlock:
@@ -1483,7 +1483,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1483 ret = 1; 1483 ret = 1;
1484 if (!prot_numa) { 1484 if (!prot_numa) {
1485 entry = pmdp_get_and_clear_notify(mm, addr, pmd); 1485 entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1486 if (pmd_numa(entry)) 1486 if (pmd_protnone(entry))
1487 entry = pmd_mknonnuma(entry); 1487 entry = pmd_mknonnuma(entry);
1488 entry = pmd_modify(entry, newprot); 1488 entry = pmd_modify(entry, newprot);
1489 ret = HPAGE_PMD_NR; 1489 ret = HPAGE_PMD_NR;
@@ -1499,7 +1499,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1499 * local vs remote hits on the zero page. 1499 * local vs remote hits on the zero page.
1500 */ 1500 */
1501 if (!is_huge_zero_page(page) && 1501 if (!is_huge_zero_page(page) &&
1502 !pmd_numa(*pmd)) { 1502 !pmd_protnone(*pmd)) {
1503 pmdp_set_numa(mm, addr, pmd); 1503 pmdp_set_numa(mm, addr, pmd);
1504 ret = HPAGE_PMD_NR; 1504 ret = HPAGE_PMD_NR;
1505 } 1505 }
@@ -1767,9 +1767,9 @@ static int __split_huge_page_map(struct page *page,
1767 pte_t *pte, entry; 1767 pte_t *pte, entry;
1768 BUG_ON(PageCompound(page+i)); 1768 BUG_ON(PageCompound(page+i));
1769 /* 1769 /*
1770 * Note that pmd_numa is not transferred deliberately 1770 * Note that NUMA hinting access restrictions are not
1771 * to avoid any possibility that pte_numa leaks to 1771 * transferred to avoid any possibility of altering
1772 * a PROT_NONE VMA by accident. 1772 * permissions across VMAs.
1773 */ 1773 */
1774 entry = mk_pte(page + i, vma->vm_page_prot); 1774 entry = mk_pte(page + i, vma->vm_page_prot);
1775 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1775 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/memory.c b/mm/memory.c
index bbe6a73a899d..92e6a6299e86 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3124,7 +3124,7 @@ static int handle_pte_fault(struct mm_struct *mm,
3124 pte, pmd, flags, entry); 3124 pte, pmd, flags, entry);
3125 } 3125 }
3126 3126
3127 if (pte_numa(entry)) 3127 if (pte_protnone(entry))
3128 return do_numa_page(mm, vma, address, entry, pte, pmd); 3128 return do_numa_page(mm, vma, address, entry, pte, pmd);
3129 3129
3130 ptl = pte_lockptr(mm, pmd); 3130 ptl = pte_lockptr(mm, pmd);
@@ -3202,7 +3202,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3202 if (pmd_trans_splitting(orig_pmd)) 3202 if (pmd_trans_splitting(orig_pmd))
3203 return 0; 3203 return 0;
3204 3204
3205 if (pmd_numa(orig_pmd)) 3205 if (pmd_protnone(orig_pmd))
3206 return do_huge_pmd_numa_page(mm, vma, address, 3206 return do_huge_pmd_numa_page(mm, vma, address,
3207 orig_pmd, pmd); 3207 orig_pmd, pmd);
3208 3208
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 33121662f08b..44ffa698484d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -75,36 +75,18 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
75 oldpte = *pte; 75 oldpte = *pte;
76 if (pte_present(oldpte)) { 76 if (pte_present(oldpte)) {
77 pte_t ptent; 77 pte_t ptent;
78 bool updated = false;
79 78
80 if (!prot_numa) { 79 ptent = ptep_modify_prot_start(mm, addr, pte);
81 ptent = ptep_modify_prot_start(mm, addr, pte); 80 ptent = pte_modify(ptent, newprot);
82 if (pte_numa(ptent)) 81
83 ptent = pte_mknonnuma(ptent); 82 /* Avoid taking write faults for known dirty pages */
84 ptent = pte_modify(ptent, newprot); 83 if (dirty_accountable && pte_dirty(ptent) &&
85 /* 84 (pte_soft_dirty(ptent) ||
86 * Avoid taking write faults for pages we 85 !(vma->vm_flags & VM_SOFTDIRTY))) {
87 * know to be dirty. 86 ptent = pte_mkwrite(ptent);
88 */
89 if (dirty_accountable && pte_dirty(ptent) &&
90 (pte_soft_dirty(ptent) ||
91 !(vma->vm_flags & VM_SOFTDIRTY)))
92 ptent = pte_mkwrite(ptent);
93 ptep_modify_prot_commit(mm, addr, pte, ptent);
94 updated = true;
95 } else {
96 struct page *page;
97
98 page = vm_normal_page(vma, addr, oldpte);
99 if (page && !PageKsm(page)) {
100 if (!pte_numa(oldpte)) {
101 ptep_set_numa(mm, addr, pte);
102 updated = true;
103 }
104 }
105 } 87 }
106 if (updated) 88 ptep_modify_prot_commit(mm, addr, pte, ptent);
107 pages++; 89 pages++;
108 } else if (IS_ENABLED(CONFIG_MIGRATION)) { 90 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
109 swp_entry_t entry = pte_to_swp_entry(oldpte); 91 swp_entry_t entry = pte_to_swp_entry(oldpte);
110 92
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index dfb79e028ecb..4b8ad760dde3 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -193,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
193 pmd_t *pmdp) 193 pmd_t *pmdp)
194{ 194{
195 pmd_t entry = *pmdp; 195 pmd_t entry = *pmdp;
196 if (pmd_numa(entry)) 196 if (pmd_protnone(entry))
197 entry = pmd_mknonnuma(entry); 197 entry = pmd_mknonnuma(entry);
198 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); 198 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);