aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2014-04-03 17:48:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 19:21:03 -0400
commite655fb29074a7aa471bfc9f51a0139c6f636a649 (patch)
tree8271ea582bbe23fc7a3d97cbaf6a051e4cff25ec /mm/memory.c
parent7eae74af32d2048d27c38bad1c767a8f3ce4ddb6 (diff)
mm: introduce do_read_fault()
Introduce do_read_fault(). The function does what do_fault() does for read page faults. Unlike do_fault(), do_read_fault() is pretty clean and straightforward. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index af76397c2c54..56784e9a7151 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3317,6 +3317,43 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
3317 return ret; 3317 return ret;
3318} 3318}
3319 3319
3320static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3321 unsigned long address, pmd_t *pmd,
3322 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
3323{
3324 struct page *fault_page;
3325 spinlock_t *ptl;
3326 pte_t entry, *pte;
3327 int ret;
3328
3329 ret = __do_fault(vma, address, pgoff, flags, &fault_page);
3330 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3331 return ret;
3332
3333 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3334 if (unlikely(!pte_same(*pte, orig_pte))) {
3335 pte_unmap_unlock(pte, ptl);
3336 unlock_page(fault_page);
3337 page_cache_release(fault_page);
3338 return ret;
3339 }
3340
3341 flush_icache_page(vma, fault_page);
3342 entry = mk_pte(fault_page, vma->vm_page_prot);
3343 if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
3344 pte_mksoft_dirty(entry);
3345 inc_mm_counter_fast(mm, MM_FILEPAGES);
3346 page_add_file_rmap(fault_page);
3347 set_pte_at(mm, address, pte, entry);
3348
3349 /* no need to invalidate: a not-present page won't be cached */
3350 update_mmu_cache(vma, address, pte);
3351 pte_unmap_unlock(pte, ptl);
3352 unlock_page(fault_page);
3353
3354 return ret;
3355}
3356
3320/* 3357/*
3321 * do_fault() tries to create a new page mapping. It aggressively 3358 * do_fault() tries to create a new page mapping. It aggressively
3322 * tries to share with existing pages, but makes a separate copy if 3359 * tries to share with existing pages, but makes a separate copy if
@@ -3510,6 +3547,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3510 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 3547 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3511 3548
3512 pte_unmap(page_table); 3549 pte_unmap(page_table);
3550 if (!(flags & FAULT_FLAG_WRITE))
3551 return do_read_fault(mm, vma, address, pmd, pgoff, flags,
3552 orig_pte);
3513 return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 3553 return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
3514} 3554}
3515 3555
@@ -3542,6 +3582,9 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3542 } 3582 }
3543 3583
3544 pgoff = pte_to_pgoff(orig_pte); 3584 pgoff = pte_to_pgoff(orig_pte);
3585 if (!(flags & FAULT_FLAG_WRITE))
3586 return do_read_fault(mm, vma, address, pmd, pgoff, flags,
3587 orig_pte);
3545 return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 3588 return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
3546} 3589}
3547 3590