aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e302ae1dcce0..0897830011f3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -756,7 +756,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
756 unsigned long pfn = pte_pfn(pte); 756 unsigned long pfn = pte_pfn(pte);
757 757
758 if (HAVE_PTE_SPECIAL) { 758 if (HAVE_PTE_SPECIAL) {
759 if (likely(!pte_special(pte))) 759 if (likely(!pte_special(pte) || pte_numa(pte)))
760 goto check_pfn; 760 goto check_pfn;
761 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 761 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
762 return NULL; 762 return NULL;
@@ -782,14 +782,15 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
782 } 782 }
783 } 783 }
784 784
785 if (is_zero_pfn(pfn))
786 return NULL;
787check_pfn: 785check_pfn:
788 if (unlikely(pfn > highest_memmap_pfn)) { 786 if (unlikely(pfn > highest_memmap_pfn)) {
789 print_bad_pte(vma, addr, pte, NULL); 787 print_bad_pte(vma, addr, pte, NULL);
790 return NULL; 788 return NULL;
791 } 789 }
792 790
791 if (is_zero_pfn(pfn))
792 return NULL;
793
793 /* 794 /*
794 * NOTE! We still have PageReserved() pages in the page tables. 795 * NOTE! We still have PageReserved() pages in the page tables.
795 * eg. VDSO mappings can cause them to exist. 796 * eg. VDSO mappings can cause them to exist.
@@ -1722,13 +1723,9 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1722 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 1723 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
1723 1724
1724 /* 1725 /*
1725 * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault 1726 * If FOLL_FORCE is set then do not force a full fault as the hinting
1726 * would be called on PROT_NONE ranges. We must never invoke 1727 * fault information is unrelated to the reference behaviour of a task
1727 * handle_mm_fault on PROT_NONE ranges or the NUMA hinting 1728 * using the address space
1728 * page faults would unprotect the PROT_NONE ranges if
1729 * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
1730 * bitflag. So to avoid that, don't set FOLL_NUMA if
1731 * FOLL_FORCE is set.
1732 */ 1729 */
1733 if (!(gup_flags & FOLL_FORCE)) 1730 if (!(gup_flags & FOLL_FORCE))
1734 gup_flags |= FOLL_NUMA; 1731 gup_flags |= FOLL_NUMA;