summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 18:07:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:09 -0500
commit66a6197c118540d454913eef24d68d7491ab5d5f (patch)
tree085d09ca04b45caa37228d5561a8fb1729be16cb /mm/memory.c
parent997dd98dd68beb2aea74cac53e7fd440cc8dba68 (diff)
mm: provide helper for finishing mkwrite faults
Provide a helper function for finishing write faults due to PTE being read-only. The helper will be used by DAX to avoid the need of complicating generic MM code with DAX locking specifics. Link: http://lkml.kernel.org/r/1479460644-25076-16-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c67
1 files changed, 40 insertions, 27 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 82e7689e3059..bbc25da48a18 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2269,6 +2269,38 @@ oom:
2269 return VM_FAULT_OOM; 2269 return VM_FAULT_OOM;
2270} 2270}
2271 2271
2272/**
2273 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2274 * writeable once the page is prepared
2275 *
2276 * @vmf: structure describing the fault
2277 *
2278 * This function handles all that is needed to finish a write page fault in a
2279 * shared mapping due to PTE being read-only once the mapped page is prepared.
2280 * It handles locking of PTE and modifying it. The function returns
2281 * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
2282 * lock.
2283 *
2284 * The function expects the page to be locked or other protection against
2285 * concurrent faults / writeback (such as DAX radix tree locks).
2286 */
2287int finish_mkwrite_fault(struct vm_fault *vmf)
2288{
2289 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
2290 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
2291 &vmf->ptl);
2292 /*
2293 * We might have raced with another page fault while we released the
2294 * pte_offset_map_lock.
2295 */
2296 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2297 pte_unmap_unlock(vmf->pte, vmf->ptl);
2298 return 0;
2299 }
2300 wp_page_reuse(vmf);
2301 return VM_FAULT_WRITE;
2302}
2303
2272/* 2304/*
2273 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 2305 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2274 * mapping 2306 * mapping
@@ -2285,16 +2317,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
2285 ret = vma->vm_ops->pfn_mkwrite(vma, vmf); 2317 ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
2286 if (ret & VM_FAULT_ERROR) 2318 if (ret & VM_FAULT_ERROR)
2287 return ret; 2319 return ret;
2288 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 2320 return finish_mkwrite_fault(vmf);
2289 vmf->address, &vmf->ptl);
2290 /*
2291 * We might have raced with another page fault while we
2292 * released the pte_offset_map_lock.
2293 */
2294 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2295 pte_unmap_unlock(vmf->pte, vmf->ptl);
2296 return 0;
2297 }
2298 } 2321 }
2299 wp_page_reuse(vmf); 2322 wp_page_reuse(vmf);
2300 return VM_FAULT_WRITE; 2323 return VM_FAULT_WRITE;
@@ -2304,7 +2327,6 @@ static int wp_page_shared(struct vm_fault *vmf)
2304 __releases(vmf->ptl) 2327 __releases(vmf->ptl)
2305{ 2328{
2306 struct vm_area_struct *vma = vmf->vma; 2329 struct vm_area_struct *vma = vmf->vma;
2307 int page_mkwrite = 0;
2308 2330
2309 get_page(vmf->page); 2331 get_page(vmf->page);
2310 2332
@@ -2318,26 +2340,17 @@ static int wp_page_shared(struct vm_fault *vmf)
2318 put_page(vmf->page); 2340 put_page(vmf->page);
2319 return tmp; 2341 return tmp;
2320 } 2342 }
2321 /* 2343 tmp = finish_mkwrite_fault(vmf);
2322 * Since we dropped the lock we need to revalidate 2344 if (unlikely(!tmp || (tmp &
2323 * the PTE as someone else may have changed it. If 2345 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2324 * they did, we just return, as we can count on the
2325 * MMU to tell us if they didn't also make it writable.
2326 */
2327 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2328 vmf->address, &vmf->ptl);
2329 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2330 unlock_page(vmf->page); 2346 unlock_page(vmf->page);
2331 pte_unmap_unlock(vmf->pte, vmf->ptl);
2332 put_page(vmf->page); 2347 put_page(vmf->page);
2333 return 0; 2348 return tmp;
2334 } 2349 }
2335 page_mkwrite = 1; 2350 } else {
2336 } 2351 wp_page_reuse(vmf);
2337
2338 wp_page_reuse(vmf);
2339 if (!page_mkwrite)
2340 lock_page(vmf->page); 2352 lock_page(vmf->page);
2353 }
2341 fault_dirty_shared_page(vma, vmf->page); 2354 fault_dirty_shared_page(vma, vmf->page);
2342 put_page(vmf->page); 2355 put_page(vmf->page);
2343 2356