aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c56
1 files changed, 45 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ac20b2a6a0c3..22e037e3364e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -690,12 +690,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
690 /* 690 /*
691 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y 691 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
692 */ 692 */
693 if (vma->vm_ops) 693 pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
694 printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n", 694 vma->vm_file,
695 vma->vm_ops->fault); 695 vma->vm_ops ? vma->vm_ops->fault : NULL,
696 if (vma->vm_file) 696 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
697 printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n", 697 mapping ? mapping->a_ops->readpage : NULL);
698 vma->vm_file->f_op->mmap);
699 dump_stack(); 698 dump_stack();
700 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 699 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
701} 700}
@@ -2181,6 +2180,42 @@ oom:
2181 return VM_FAULT_OOM; 2180 return VM_FAULT_OOM;
2182} 2181}
2183 2182
2183/*
2184 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2185 * mapping
2186 */
2187static int wp_pfn_shared(struct mm_struct *mm,
2188 struct vm_area_struct *vma, unsigned long address,
2189 pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
2190 pmd_t *pmd)
2191{
2192 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
2193 struct vm_fault vmf = {
2194 .page = NULL,
2195 .pgoff = linear_page_index(vma, address),
2196 .virtual_address = (void __user *)(address & PAGE_MASK),
2197 .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
2198 };
2199 int ret;
2200
2201 pte_unmap_unlock(page_table, ptl);
2202 ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
2203 if (ret & VM_FAULT_ERROR)
2204 return ret;
2205 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2206 /*
2207 * We might have raced with another page fault while we
2208 * released the pte_offset_map_lock.
2209 */
2210 if (!pte_same(*page_table, orig_pte)) {
2211 pte_unmap_unlock(page_table, ptl);
2212 return 0;
2213 }
2214 }
2215 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte,
2216 NULL, 0, 0);
2217}
2218
2184static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, 2219static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2185 unsigned long address, pte_t *page_table, 2220 unsigned long address, pte_t *page_table,
2186 pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, 2221 pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte,
@@ -2259,13 +2294,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2259 * VM_PFNMAP VMA. 2294 * VM_PFNMAP VMA.
2260 * 2295 *
2261 * We should not cow pages in a shared writeable mapping. 2296 * We should not cow pages in a shared writeable mapping.
2262 * Just mark the pages writable as we can't do any dirty 2297 * Just mark the pages writable and/or call ops->pfn_mkwrite.
2263 * accounting on raw pfn maps.
2264 */ 2298 */
2265 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2299 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2266 (VM_WRITE|VM_SHARED)) 2300 (VM_WRITE|VM_SHARED))
2267 return wp_page_reuse(mm, vma, address, page_table, ptl, 2301 return wp_pfn_shared(mm, vma, address, page_table, ptl,
2268 orig_pte, old_page, 0, 0); 2302 orig_pte, pmd);
2269 2303
2270 pte_unmap_unlock(page_table, ptl); 2304 pte_unmap_unlock(page_table, ptl);
2271 return wp_page_copy(mm, vma, address, page_table, pmd, 2305 return wp_page_copy(mm, vma, address, page_table, pmd,
@@ -2845,7 +2879,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
2845 struct vm_fault vmf; 2879 struct vm_fault vmf;
2846 int off; 2880 int off;
2847 2881
2848 nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT; 2882 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
2849 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; 2883 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
2850 2884
2851 start_addr = max(address & mask, vma->vm_start); 2885 start_addr = max(address & mask, vma->vm_start);