aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2016-07-07 20:19:15 -0400
committerIngo Molnar <mingo@kernel.org>2016-07-13 03:43:25 -0400
commitdcb32d9913b7ed527b135a7e221f8d14b67bb952 (patch)
tree8a6f1a2fc60613780291a1a11b5bd62c5a238b77
parente4a84be6f05eab4778732d799f63b3cd15427885 (diff)
x86/mm: Use pte_none() to test for empty PTE
The page table manipulation code seems to have grown a couple of sites that are looking for empty PTEs. Just in case one of these entries got a stray bit set, use pte_none() instead of checking for a zero pte_val(). The use pte_same() makes me a bit nervous. If we were doing a pte_same() check against two cleared entries and one of them had a stray bit set, it might fail the pte_same() check. But, I don't think we ever _do_ pte_same() for cleared entries. It is almost entirely used for checking for races in fault-in paths. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave@sr71.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luis R. Rodriguez <mcgrof@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hp.com> Cc: dave.hansen@intel.com Cc: linux-mm@kvack.org Cc: mhocko@suse.com Link: http://lkml.kernel.org/r/20160708001915.813703D9@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/init_64.c12
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pgtable_32.c2
3 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bce2e5d9edd4..bb88fbc0a288 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -354,7 +354,7 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
354 * pagetable pages as RO. So assume someone who pre-setup 354 * pagetable pages as RO. So assume someone who pre-setup
355 * these mappings are more intelligent. 355 * these mappings are more intelligent.
356 */ 356 */
357 if (pte_val(*pte)) { 357 if (!pte_none(*pte)) {
358 if (!after_bootmem) 358 if (!after_bootmem)
359 pages++; 359 pages++;
360 continue; 360 continue;
@@ -396,7 +396,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
396 continue; 396 continue;
397 } 397 }
398 398
399 if (pmd_val(*pmd)) { 399 if (!pmd_none(*pmd)) {
400 if (!pmd_large(*pmd)) { 400 if (!pmd_large(*pmd)) {
401 spin_lock(&init_mm.page_table_lock); 401 spin_lock(&init_mm.page_table_lock);
402 pte = (pte_t *)pmd_page_vaddr(*pmd); 402 pte = (pte_t *)pmd_page_vaddr(*pmd);
@@ -470,7 +470,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
470 continue; 470 continue;
471 } 471 }
472 472
473 if (pud_val(*pud)) { 473 if (!pud_none(*pud)) {
474 if (!pud_large(*pud)) { 474 if (!pud_large(*pud)) {
475 pmd = pmd_offset(pud, 0); 475 pmd = pmd_offset(pud, 0);
476 last_map_addr = phys_pmd_init(pmd, addr, end, 476 last_map_addr = phys_pmd_init(pmd, addr, end,
@@ -673,7 +673,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
673 673
674 for (i = 0; i < PTRS_PER_PTE; i++) { 674 for (i = 0; i < PTRS_PER_PTE; i++) {
675 pte = pte_start + i; 675 pte = pte_start + i;
676 if (pte_val(*pte)) 676 if (!pte_none(*pte))
677 return; 677 return;
678 } 678 }
679 679
@@ -691,7 +691,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
691 691
692 for (i = 0; i < PTRS_PER_PMD; i++) { 692 for (i = 0; i < PTRS_PER_PMD; i++) {
693 pmd = pmd_start + i; 693 pmd = pmd_start + i;
694 if (pmd_val(*pmd)) 694 if (!pmd_none(*pmd))
695 return; 695 return;
696 } 696 }
697 697
@@ -710,7 +710,7 @@ static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
710 710
711 for (i = 0; i < PTRS_PER_PUD; i++) { 711 for (i = 0; i < PTRS_PER_PUD; i++) {
712 pud = pud_start + i; 712 pud = pud_start + i;
713 if (pud_val(*pud)) 713 if (!pud_none(*pud))
714 return false; 714 return false;
715 } 715 }
716 716
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 7a1f7bbf4105..75142159b0a5 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1185,7 +1185,7 @@ repeat:
1185 return __cpa_process_fault(cpa, address, primary); 1185 return __cpa_process_fault(cpa, address, primary);
1186 1186
1187 old_pte = *kpte; 1187 old_pte = *kpte;
1188 if (!pte_val(old_pte)) 1188 if (pte_none(old_pte))
1189 return __cpa_process_fault(cpa, address, primary); 1189 return __cpa_process_fault(cpa, address, primary);
1190 1190
1191 if (level == PG_LEVEL_4K) { 1191 if (level == PG_LEVEL_4K) {
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 75cc0978d45d..e67ae0e6c59d 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -47,7 +47,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
47 return; 47 return;
48 } 48 }
49 pte = pte_offset_kernel(pmd, vaddr); 49 pte = pte_offset_kernel(pmd, vaddr);
50 if (pte_val(pteval)) 50 if (!pte_none(pteval))
51 set_pte_at(&init_mm, vaddr, pte, pteval); 51 set_pte_at(&init_mm, vaddr, pte, pteval);
52 else 52 else
53 pte_clear(&init_mm, vaddr, pte); 53 pte_clear(&init_mm, vaddr, pte);