summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 18:07:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:09 -0500
commit2994302bc8a17180788fac66a47102d338d5d0ec (patch)
tree1f575e8b39fdba1dcf647b26739d874c00d9cf7c /mm/memory.c
parentfe82221f57ea6840a4238a8e077e3f93f257a03f (diff)
mm: add orig_pte field into vm_fault
Add orig_pte field to vm_fault structure to allow ->page_mkwrite handlers to fully handle the fault. This also allows us to save some passing of extra arguments around. Link: http://lkml.kernel.org/r/1479460644-25076-8-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c82
1 files changed, 41 insertions, 41 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 7ba9cc58dddd..cf74f7ca911b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2070,8 +2070,8 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2070 * case, all we need to do here is to mark the page as writable and update 2070 * case, all we need to do here is to mark the page as writable and update
2071 * any related book-keeping. 2071 * any related book-keeping.
2072 */ 2072 */
2073static inline int wp_page_reuse(struct vm_fault *vmf, pte_t orig_pte, 2073static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page,
2074 struct page *page, int page_mkwrite, int dirty_shared) 2074 int page_mkwrite, int dirty_shared)
2075 __releases(vmf->ptl) 2075 __releases(vmf->ptl)
2076{ 2076{
2077 struct vm_area_struct *vma = vmf->vma; 2077 struct vm_area_struct *vma = vmf->vma;
@@ -2084,8 +2084,8 @@ static inline int wp_page_reuse(struct vm_fault *vmf, pte_t orig_pte,
2084 if (page) 2084 if (page)
2085 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); 2085 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2086 2086
2087 flush_cache_page(vma, vmf->address, pte_pfn(orig_pte)); 2087 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2088 entry = pte_mkyoung(orig_pte); 2088 entry = pte_mkyoung(vmf->orig_pte);
2089 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2089 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2090 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 2090 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2091 update_mmu_cache(vma, vmf->address, vmf->pte); 2091 update_mmu_cache(vma, vmf->address, vmf->pte);
@@ -2135,8 +2135,7 @@ static inline int wp_page_reuse(struct vm_fault *vmf, pte_t orig_pte,
2135 * held to the old page, as well as updating the rmap. 2135 * held to the old page, as well as updating the rmap.
2136 * - In any case, unlock the PTL and drop the reference we took to the old page. 2136 * - In any case, unlock the PTL and drop the reference we took to the old page.
2137 */ 2137 */
2138static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte, 2138static int wp_page_copy(struct vm_fault *vmf, struct page *old_page)
2139 struct page *old_page)
2140{ 2139{
2141 struct vm_area_struct *vma = vmf->vma; 2140 struct vm_area_struct *vma = vmf->vma;
2142 struct mm_struct *mm = vma->vm_mm; 2141 struct mm_struct *mm = vma->vm_mm;
@@ -2150,7 +2149,7 @@ static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte,
2150 if (unlikely(anon_vma_prepare(vma))) 2149 if (unlikely(anon_vma_prepare(vma)))
2151 goto oom; 2150 goto oom;
2152 2151
2153 if (is_zero_pfn(pte_pfn(orig_pte))) { 2152 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2154 new_page = alloc_zeroed_user_highpage_movable(vma, 2153 new_page = alloc_zeroed_user_highpage_movable(vma,
2155 vmf->address); 2154 vmf->address);
2156 if (!new_page) 2155 if (!new_page)
@@ -2174,7 +2173,7 @@ static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte,
2174 * Re-check the pte - we dropped the lock 2173 * Re-check the pte - we dropped the lock
2175 */ 2174 */
2176 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 2175 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2177 if (likely(pte_same(*vmf->pte, orig_pte))) { 2176 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2178 if (old_page) { 2177 if (old_page) {
2179 if (!PageAnon(old_page)) { 2178 if (!PageAnon(old_page)) {
2180 dec_mm_counter_fast(mm, 2179 dec_mm_counter_fast(mm,
@@ -2184,7 +2183,7 @@ static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte,
2184 } else { 2183 } else {
2185 inc_mm_counter_fast(mm, MM_ANONPAGES); 2184 inc_mm_counter_fast(mm, MM_ANONPAGES);
2186 } 2185 }
2187 flush_cache_page(vma, vmf->address, pte_pfn(orig_pte)); 2186 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2188 entry = mk_pte(new_page, vma->vm_page_prot); 2187 entry = mk_pte(new_page, vma->vm_page_prot);
2189 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2188 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2190 /* 2189 /*
@@ -2268,7 +2267,7 @@ oom:
2268 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 2267 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2269 * mapping 2268 * mapping
2270 */ 2269 */
2271static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) 2270static int wp_pfn_shared(struct vm_fault *vmf)
2272{ 2271{
2273 struct vm_area_struct *vma = vmf->vma; 2272 struct vm_area_struct *vma = vmf->vma;
2274 2273
@@ -2286,16 +2285,15 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte)
2286 * We might have raced with another page fault while we 2285 * We might have raced with another page fault while we
2287 * released the pte_offset_map_lock. 2286 * released the pte_offset_map_lock.
2288 */ 2287 */
2289 if (!pte_same(*vmf->pte, orig_pte)) { 2288 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2290 pte_unmap_unlock(vmf->pte, vmf->ptl); 2289 pte_unmap_unlock(vmf->pte, vmf->ptl);
2291 return 0; 2290 return 0;
2292 } 2291 }
2293 } 2292 }
2294 return wp_page_reuse(vmf, orig_pte, NULL, 0, 0); 2293 return wp_page_reuse(vmf, NULL, 0, 0);
2295} 2294}
2296 2295
2297static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte, 2296static int wp_page_shared(struct vm_fault *vmf, struct page *old_page)
2298 struct page *old_page)
2299 __releases(vmf->ptl) 2297 __releases(vmf->ptl)
2300{ 2298{
2301 struct vm_area_struct *vma = vmf->vma; 2299 struct vm_area_struct *vma = vmf->vma;
@@ -2321,7 +2319,7 @@ static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte,
2321 */ 2319 */
2322 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 2320 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2323 vmf->address, &vmf->ptl); 2321 vmf->address, &vmf->ptl);
2324 if (!pte_same(*vmf->pte, orig_pte)) { 2322 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2325 unlock_page(old_page); 2323 unlock_page(old_page);
2326 pte_unmap_unlock(vmf->pte, vmf->ptl); 2324 pte_unmap_unlock(vmf->pte, vmf->ptl);
2327 put_page(old_page); 2325 put_page(old_page);
@@ -2330,7 +2328,7 @@ static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte,
2330 page_mkwrite = 1; 2328 page_mkwrite = 1;
2331 } 2329 }
2332 2330
2333 return wp_page_reuse(vmf, orig_pte, old_page, page_mkwrite, 1); 2331 return wp_page_reuse(vmf, old_page, page_mkwrite, 1);
2334} 2332}
2335 2333
2336/* 2334/*
@@ -2351,13 +2349,13 @@ static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte,
2351 * but allow concurrent faults), with pte both mapped and locked. 2349 * but allow concurrent faults), with pte both mapped and locked.
2352 * We return with mmap_sem still held, but pte unmapped and unlocked. 2350 * We return with mmap_sem still held, but pte unmapped and unlocked.
2353 */ 2351 */
2354static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte) 2352static int do_wp_page(struct vm_fault *vmf)
2355 __releases(vmf->ptl) 2353 __releases(vmf->ptl)
2356{ 2354{
2357 struct vm_area_struct *vma = vmf->vma; 2355 struct vm_area_struct *vma = vmf->vma;
2358 struct page *old_page; 2356 struct page *old_page;
2359 2357
2360 old_page = vm_normal_page(vma, vmf->address, orig_pte); 2358 old_page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
2361 if (!old_page) { 2359 if (!old_page) {
2362 /* 2360 /*
2363 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 2361 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
@@ -2368,10 +2366,10 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte)
2368 */ 2366 */
2369 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2367 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2370 (VM_WRITE|VM_SHARED)) 2368 (VM_WRITE|VM_SHARED))
2371 return wp_pfn_shared(vmf, orig_pte); 2369 return wp_pfn_shared(vmf);
2372 2370
2373 pte_unmap_unlock(vmf->pte, vmf->ptl); 2371 pte_unmap_unlock(vmf->pte, vmf->ptl);
2374 return wp_page_copy(vmf, orig_pte, old_page); 2372 return wp_page_copy(vmf, old_page);
2375 } 2373 }
2376 2374
2377 /* 2375 /*
@@ -2386,7 +2384,7 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte)
2386 lock_page(old_page); 2384 lock_page(old_page);
2387 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 2385 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2388 vmf->address, &vmf->ptl); 2386 vmf->address, &vmf->ptl);
2389 if (!pte_same(*vmf->pte, orig_pte)) { 2387 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2390 unlock_page(old_page); 2388 unlock_page(old_page);
2391 pte_unmap_unlock(vmf->pte, vmf->ptl); 2389 pte_unmap_unlock(vmf->pte, vmf->ptl);
2392 put_page(old_page); 2390 put_page(old_page);
@@ -2406,12 +2404,12 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte)
2406 page_move_anon_rmap(old_page, vma); 2404 page_move_anon_rmap(old_page, vma);
2407 } 2405 }
2408 unlock_page(old_page); 2406 unlock_page(old_page);
2409 return wp_page_reuse(vmf, orig_pte, old_page, 0, 0); 2407 return wp_page_reuse(vmf, old_page, 0, 0);
2410 } 2408 }
2411 unlock_page(old_page); 2409 unlock_page(old_page);
2412 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2410 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2413 (VM_WRITE|VM_SHARED))) { 2411 (VM_WRITE|VM_SHARED))) {
2414 return wp_page_shared(vmf, orig_pte, old_page); 2412 return wp_page_shared(vmf, old_page);
2415 } 2413 }
2416 2414
2417 /* 2415 /*
@@ -2420,7 +2418,7 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte)
2420 get_page(old_page); 2418 get_page(old_page);
2421 2419
2422 pte_unmap_unlock(vmf->pte, vmf->ptl); 2420 pte_unmap_unlock(vmf->pte, vmf->ptl);
2423 return wp_page_copy(vmf, orig_pte, old_page); 2421 return wp_page_copy(vmf, old_page);
2424} 2422}
2425 2423
2426static void unmap_mapping_range_vma(struct vm_area_struct *vma, 2424static void unmap_mapping_range_vma(struct vm_area_struct *vma,
@@ -2508,7 +2506,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
2508 * We return with the mmap_sem locked or unlocked in the same cases 2506 * We return with the mmap_sem locked or unlocked in the same cases
2509 * as does filemap_fault(). 2507 * as does filemap_fault().
2510 */ 2508 */
2511int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) 2509int do_swap_page(struct vm_fault *vmf)
2512{ 2510{
2513 struct vm_area_struct *vma = vmf->vma; 2511 struct vm_area_struct *vma = vmf->vma;
2514 struct page *page, *swapcache; 2512 struct page *page, *swapcache;
@@ -2519,10 +2517,10 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte)
2519 int exclusive = 0; 2517 int exclusive = 0;
2520 int ret = 0; 2518 int ret = 0;
2521 2519
2522 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, orig_pte)) 2520 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
2523 goto out; 2521 goto out;
2524 2522
2525 entry = pte_to_swp_entry(orig_pte); 2523 entry = pte_to_swp_entry(vmf->orig_pte);
2526 if (unlikely(non_swap_entry(entry))) { 2524 if (unlikely(non_swap_entry(entry))) {
2527 if (is_migration_entry(entry)) { 2525 if (is_migration_entry(entry)) {
2528 migration_entry_wait(vma->vm_mm, vmf->pmd, 2526 migration_entry_wait(vma->vm_mm, vmf->pmd,
@@ -2530,7 +2528,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte)
2530 } else if (is_hwpoison_entry(entry)) { 2528 } else if (is_hwpoison_entry(entry)) {
2531 ret = VM_FAULT_HWPOISON; 2529 ret = VM_FAULT_HWPOISON;
2532 } else { 2530 } else {
2533 print_bad_pte(vma, vmf->address, orig_pte, NULL); 2531 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
2534 ret = VM_FAULT_SIGBUS; 2532 ret = VM_FAULT_SIGBUS;
2535 } 2533 }
2536 goto out; 2534 goto out;
@@ -2547,7 +2545,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte)
2547 */ 2545 */
2548 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 2546 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2549 vmf->address, &vmf->ptl); 2547 vmf->address, &vmf->ptl);
2550 if (likely(pte_same(*vmf->pte, orig_pte))) 2548 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
2551 ret = VM_FAULT_OOM; 2549 ret = VM_FAULT_OOM;
2552 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2550 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2553 goto unlock; 2551 goto unlock;
@@ -2604,7 +2602,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte)
2604 */ 2602 */
2605 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 2603 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
2606 &vmf->ptl); 2604 &vmf->ptl);
2607 if (unlikely(!pte_same(*vmf->pte, orig_pte))) 2605 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
2608 goto out_nomap; 2606 goto out_nomap;
2609 2607
2610 if (unlikely(!PageUptodate(page))) { 2608 if (unlikely(!PageUptodate(page))) {
@@ -2632,9 +2630,10 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte)
2632 exclusive = RMAP_EXCLUSIVE; 2630 exclusive = RMAP_EXCLUSIVE;
2633 } 2631 }
2634 flush_icache_page(vma, page); 2632 flush_icache_page(vma, page);
2635 if (pte_swp_soft_dirty(orig_pte)) 2633 if (pte_swp_soft_dirty(vmf->orig_pte))
2636 pte = pte_mksoft_dirty(pte); 2634 pte = pte_mksoft_dirty(pte);
2637 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 2635 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
2636 vmf->orig_pte = pte;
2638 if (page == swapcache) { 2637 if (page == swapcache) {
2639 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); 2638 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
2640 mem_cgroup_commit_charge(page, memcg, true, false); 2639 mem_cgroup_commit_charge(page, memcg, true, false);
@@ -2664,7 +2663,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte)
2664 } 2663 }
2665 2664
2666 if (vmf->flags & FAULT_FLAG_WRITE) { 2665 if (vmf->flags & FAULT_FLAG_WRITE) {
2667 ret |= do_wp_page(vmf, pte); 2666 ret |= do_wp_page(vmf);
2668 if (ret & VM_FAULT_ERROR) 2667 if (ret & VM_FAULT_ERROR)
2669 ret &= VM_FAULT_ERROR; 2668 ret &= VM_FAULT_ERROR;
2670 goto out; 2669 goto out;
@@ -3363,7 +3362,7 @@ static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
3363 return mpol_misplaced(page, vma, addr); 3362 return mpol_misplaced(page, vma, addr);
3364} 3363}
3365 3364
3366static int do_numa_page(struct vm_fault *vmf, pte_t pte) 3365static int do_numa_page(struct vm_fault *vmf)
3367{ 3366{
3368 struct vm_area_struct *vma = vmf->vma; 3367 struct vm_area_struct *vma = vmf->vma;
3369 struct page *page = NULL; 3368 struct page *page = NULL;
@@ -3371,6 +3370,7 @@ static int do_numa_page(struct vm_fault *vmf, pte_t pte)
3371 int last_cpupid; 3370 int last_cpupid;
3372 int target_nid; 3371 int target_nid;
3373 bool migrated = false; 3372 bool migrated = false;
3373 pte_t pte = vmf->orig_pte;
3374 bool was_writable = pte_write(pte); 3374 bool was_writable = pte_write(pte);
3375 int flags = 0; 3375 int flags = 0;
3376 3376
@@ -3521,8 +3521,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
3521 * So now it's safe to run pte_offset_map(). 3521 * So now it's safe to run pte_offset_map().
3522 */ 3522 */
3523 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); 3523 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
3524 3524 vmf->orig_pte = *vmf->pte;
3525 entry = *vmf->pte;
3526 3525
3527 /* 3526 /*
3528 * some architectures can have larger ptes than wordsize, 3527 * some architectures can have larger ptes than wordsize,
@@ -3533,7 +3532,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
3533 * ptl lock held. So here a barrier will do. 3532 * ptl lock held. So here a barrier will do.
3534 */ 3533 */
3535 barrier(); 3534 barrier();
3536 if (pte_none(entry)) { 3535 if (pte_none(vmf->orig_pte)) {
3537 pte_unmap(vmf->pte); 3536 pte_unmap(vmf->pte);
3538 vmf->pte = NULL; 3537 vmf->pte = NULL;
3539 } 3538 }
@@ -3546,19 +3545,20 @@ static int handle_pte_fault(struct vm_fault *vmf)
3546 return do_fault(vmf); 3545 return do_fault(vmf);
3547 } 3546 }
3548 3547
3549 if (!pte_present(entry)) 3548 if (!pte_present(vmf->orig_pte))
3550 return do_swap_page(vmf, entry); 3549 return do_swap_page(vmf);
3551 3550
3552 if (pte_protnone(entry) && vma_is_accessible(vmf->vma)) 3551 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
3553 return do_numa_page(vmf, entry); 3552 return do_numa_page(vmf);
3554 3553
3555 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); 3554 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
3556 spin_lock(vmf->ptl); 3555 spin_lock(vmf->ptl);
3556 entry = vmf->orig_pte;
3557 if (unlikely(!pte_same(*vmf->pte, entry))) 3557 if (unlikely(!pte_same(*vmf->pte, entry)))
3558 goto unlock; 3558 goto unlock;
3559 if (vmf->flags & FAULT_FLAG_WRITE) { 3559 if (vmf->flags & FAULT_FLAG_WRITE) {
3560 if (!pte_write(entry)) 3560 if (!pte_write(entry))
3561 return do_wp_page(vmf, entry); 3561 return do_wp_page(vmf);
3562 entry = pte_mkdirty(entry); 3562 entry = pte_mkdirty(entry);
3563 } 3563 }
3564 entry = pte_mkyoung(entry); 3564 entry = pte_mkyoung(entry);