aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c53
1 files changed, 44 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c
index fc38d4ed9ad..c8b5b9435a9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -108,6 +108,17 @@ static int __init disable_randmaps(char *s)
108} 108}
109__setup("norandmaps", disable_randmaps); 109__setup("norandmaps", disable_randmaps);
110 110
111static unsigned long zero_pfn __read_mostly;
112
113/*
114 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
115 */
116static int __init init_zero_pfn(void)
117{
118 zero_pfn = page_to_pfn(ZERO_PAGE(0));
119 return 0;
120}
121core_initcall(init_zero_pfn);
111 122
112/* 123/*
113 * If a p?d_bad entry is found while walking page tables, report 124 * If a p?d_bad entry is found while walking page tables, report
@@ -499,7 +510,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
499 if (HAVE_PTE_SPECIAL) { 510 if (HAVE_PTE_SPECIAL) {
500 if (likely(!pte_special(pte))) 511 if (likely(!pte_special(pte)))
501 goto check_pfn; 512 goto check_pfn;
502 if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) 513 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
514 return NULL;
515 if (pfn != zero_pfn)
503 print_bad_pte(vma, addr, pte, NULL); 516 print_bad_pte(vma, addr, pte, NULL);
504 return NULL; 517 return NULL;
505 } 518 }
@@ -1144,9 +1157,14 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1144 goto no_page; 1157 goto no_page;
1145 if ((flags & FOLL_WRITE) && !pte_write(pte)) 1158 if ((flags & FOLL_WRITE) && !pte_write(pte))
1146 goto unlock; 1159 goto unlock;
1160
1147 page = vm_normal_page(vma, address, pte); 1161 page = vm_normal_page(vma, address, pte);
1148 if (unlikely(!page)) 1162 if (unlikely(!page)) {
1149 goto bad_page; 1163 if ((flags & FOLL_DUMP) ||
1164 pte_pfn(pte) != zero_pfn)
1165 goto bad_page;
1166 page = pte_page(pte);
1167 }
1150 1168
1151 if (flags & FOLL_GET) 1169 if (flags & FOLL_GET)
1152 get_page(page); 1170 get_page(page);
@@ -2084,10 +2102,19 @@ gotten:
2084 2102
2085 if (unlikely(anon_vma_prepare(vma))) 2103 if (unlikely(anon_vma_prepare(vma)))
2086 goto oom; 2104 goto oom;
2087 VM_BUG_ON(old_page == ZERO_PAGE(0)); 2105
2088 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 2106 if (pte_pfn(orig_pte) == zero_pfn) {
2089 if (!new_page) 2107 new_page = alloc_zeroed_user_highpage_movable(vma, address);
2090 goto oom; 2108 if (!new_page)
2109 goto oom;
2110 } else {
2111 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2112 if (!new_page)
2113 goto oom;
2114 cow_user_page(new_page, old_page, address, vma);
2115 }
2116 __SetPageUptodate(new_page);
2117
2091 /* 2118 /*
2092 * Don't let another task, with possibly unlocked vma, 2119 * Don't let another task, with possibly unlocked vma,
2093 * keep the mlocked page. 2120 * keep the mlocked page.
@@ -2097,8 +2124,6 @@ gotten:
2097 clear_page_mlock(old_page); 2124 clear_page_mlock(old_page);
2098 unlock_page(old_page); 2125 unlock_page(old_page);
2099 } 2126 }
2100 cow_user_page(new_page, old_page, address, vma);
2101 __SetPageUptodate(new_page);
2102 2127
2103 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) 2128 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2104 goto oom_free_new; 2129 goto oom_free_new;
@@ -2639,6 +2664,15 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2639 spinlock_t *ptl; 2664 spinlock_t *ptl;
2640 pte_t entry; 2665 pte_t entry;
2641 2666
2667 if (HAVE_PTE_SPECIAL && !(flags & FAULT_FLAG_WRITE)) {
2668 entry = pte_mkspecial(pfn_pte(zero_pfn, vma->vm_page_prot));
2669 ptl = pte_lockptr(mm, pmd);
2670 spin_lock(ptl);
2671 if (!pte_none(*page_table))
2672 goto unlock;
2673 goto setpte;
2674 }
2675
2642 /* Allocate our own private page. */ 2676 /* Allocate our own private page. */
2643 pte_unmap(page_table); 2677 pte_unmap(page_table);
2644 2678
@@ -2662,6 +2696,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2662 2696
2663 inc_mm_counter(mm, anon_rss); 2697 inc_mm_counter(mm, anon_rss);
2664 page_add_new_anon_rmap(page, vma, address); 2698 page_add_new_anon_rmap(page, vma, address);
2699setpte:
2665 set_pte_at(mm, address, page_table, entry); 2700 set_pte_at(mm, address, page_table, entry);
2666 2701
2667 /* No need to invalidate - it was non-present before */ 2702 /* No need to invalidate - it was non-present before */