summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 18:07:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:09 -0500
commit97ba0c2b4b0994044e404b7a96fc92a2e0424534 (patch)
tree295c28bd72525781e3fe601d5585d7db9253c837 /mm/memory.c
parentb1aa812b21084285e9f6098639be9cd5bf9e05d7 (diff)
mm: factor out common parts of write fault handling
Currently we duplicate handling of shared write faults in wp_page_reuse() and do_shared_fault(). Factor them out into a common function. Link: http://lkml.kernel.org/r/1479460644-25076-12-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c78
1 files changed, 37 insertions, 41 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ca3b95fa5fd1..6fd827804bf5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2063,6 +2063,41 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2063} 2063}
2064 2064
2065/* 2065/*
2066 * Handle dirtying of a page in shared file mapping on a write fault.
2067 *
2068 * The function expects the page to be locked and unlocks it.
2069 */
2070static void fault_dirty_shared_page(struct vm_area_struct *vma,
2071 struct page *page)
2072{
2073 struct address_space *mapping;
2074 bool dirtied;
2075 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2076
2077 dirtied = set_page_dirty(page);
2078 VM_BUG_ON_PAGE(PageAnon(page), page);
2079 /*
2080 * Take a local copy of the address_space - page.mapping may be zeroed
2081 * by truncate after unlock_page(). The address_space itself remains
2082 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
2083 * release semantics to prevent the compiler from undoing this copying.
2084 */
2085 mapping = page_rmapping(page);
2086 unlock_page(page);
2087
2088 if ((dirtied || page_mkwrite) && mapping) {
2089 /*
2090 * Some device drivers do not set page.mapping
2091 * but still dirty their pages
2092 */
2093 balance_dirty_pages_ratelimited(mapping);
2094 }
2095
2096 if (!page_mkwrite)
2097 file_update_time(vma->vm_file);
2098}
2099
2100/*
2066 * Handle write page faults for pages that can be reused in the current vma 2101 * Handle write page faults for pages that can be reused in the current vma
2067 * 2102 *
2068 * This can happen either due to the mapping being with the VM_SHARED flag, 2103 * This can happen either due to the mapping being with the VM_SHARED flag,
@@ -2092,28 +2127,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page,
2092 pte_unmap_unlock(vmf->pte, vmf->ptl); 2127 pte_unmap_unlock(vmf->pte, vmf->ptl);
2093 2128
2094 if (dirty_shared) { 2129 if (dirty_shared) {
2095 struct address_space *mapping;
2096 int dirtied;
2097
2098 if (!page_mkwrite) 2130 if (!page_mkwrite)
2099 lock_page(page); 2131 lock_page(page);
2100 2132
2101 dirtied = set_page_dirty(page); 2133 fault_dirty_shared_page(vma, page);
2102 VM_BUG_ON_PAGE(PageAnon(page), page);
2103 mapping = page->mapping;
2104 unlock_page(page);
2105 put_page(page); 2134 put_page(page);
2106
2107 if ((dirtied || page_mkwrite) && mapping) {
2108 /*
2109 * Some device drivers do not set page.mapping
2110 * but still dirty their pages
2111 */
2112 balance_dirty_pages_ratelimited(mapping);
2113 }
2114
2115 if (!page_mkwrite)
2116 file_update_time(vma->vm_file);
2117 } 2135 }
2118 2136
2119 return VM_FAULT_WRITE; 2137 return VM_FAULT_WRITE;
@@ -3294,8 +3312,6 @@ uncharge_out:
3294static int do_shared_fault(struct vm_fault *vmf) 3312static int do_shared_fault(struct vm_fault *vmf)
3295{ 3313{
3296 struct vm_area_struct *vma = vmf->vma; 3314 struct vm_area_struct *vma = vmf->vma;
3297 struct address_space *mapping;
3298 int dirtied = 0;
3299 int ret, tmp; 3315 int ret, tmp;
3300 3316
3301 ret = __do_fault(vmf); 3317 ret = __do_fault(vmf);
@@ -3324,27 +3340,7 @@ static int do_shared_fault(struct vm_fault *vmf)
3324 return ret; 3340 return ret;
3325 } 3341 }
3326 3342
3327 if (set_page_dirty(vmf->page)) 3343 fault_dirty_shared_page(vma, vmf->page);
3328 dirtied = 1;
3329 /*
3330 * Take a local copy of the address_space - page.mapping may be zeroed
3331 * by truncate after unlock_page(). The address_space itself remains
3332 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
3333 * release semantics to prevent the compiler from undoing this copying.
3334 */
3335 mapping = page_rmapping(vmf->page);
3336 unlock_page(vmf->page);
3337 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
3338 /*
3339 * Some device drivers do not set page.mapping but still
3340 * dirty their pages
3341 */
3342 balance_dirty_pages_ratelimited(mapping);
3343 }
3344
3345 if (!vma->vm_ops->page_mkwrite)
3346 file_update_time(vma->vm_file);
3347
3348 return ret; 3344 return ret;
3349} 3345}
3350 3346