aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2015-02-10 17:11:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 17:30:34 -0500
commitf38b4b310d402055702c63b0989dbcd16adf9537 (patch)
treebb931dc1e273ff0027243c596b795a0f504a59d7 /mm/memory.c
parent74ec67511d36f9c731065b1dae7d9638a3b639d3 (diff)
mm: memory: merge shared-writable dirtying branches in do_wp_page()
Whether there is a vm_ops->page_mkwrite or not, the page dirtying is pretty much the same. Make sure the page references are the same in both cases, then merge the two branches. It's tempting to go even further and page-lock the !page_mkwrite case, to get it in line with everybody else setting the page table and thus further simplify the model. But that's not quite compelling enough to justify dropping the pte lock, then relocking and verifying the entry for filesystems without ->page_mkwrite, which notably includes tmpfs. Leave it for now and lock the page late in the !page_mkwrite case. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c48
1 files changed, 17 insertions, 31 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 0e9b32610655..988d3099a25d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2005,7 +2005,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2005 pte_t entry; 2005 pte_t entry;
2006 int ret = 0; 2006 int ret = 0;
2007 int page_mkwrite = 0; 2007 int page_mkwrite = 0;
2008 struct page *dirty_page = NULL; 2008 bool dirty_shared = false;
2009 unsigned long mmun_start = 0; /* For mmu_notifiers */ 2009 unsigned long mmun_start = 0; /* For mmu_notifiers */
2010 unsigned long mmun_end = 0; /* For mmu_notifiers */ 2010 unsigned long mmun_end = 0; /* For mmu_notifiers */
2011 struct mem_cgroup *memcg; 2011 struct mem_cgroup *memcg;
@@ -2056,6 +2056,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2056 unlock_page(old_page); 2056 unlock_page(old_page);
2057 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2057 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2058 (VM_WRITE|VM_SHARED))) { 2058 (VM_WRITE|VM_SHARED))) {
2059 page_cache_get(old_page);
2059 /* 2060 /*
2060 * Only catch write-faults on shared writable pages, 2061 * Only catch write-faults on shared writable pages,
2061 * read-only shared pages can get COWed by 2062 * read-only shared pages can get COWed by
@@ -2063,7 +2064,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2063 */ 2064 */
2064 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2065 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2065 int tmp; 2066 int tmp;
2066 page_cache_get(old_page); 2067
2067 pte_unmap_unlock(page_table, ptl); 2068 pte_unmap_unlock(page_table, ptl);
2068 tmp = do_page_mkwrite(vma, old_page, address); 2069 tmp = do_page_mkwrite(vma, old_page, address);
2069 if (unlikely(!tmp || (tmp & 2070 if (unlikely(!tmp || (tmp &
@@ -2083,11 +2084,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2083 unlock_page(old_page); 2084 unlock_page(old_page);
2084 goto unlock; 2085 goto unlock;
2085 } 2086 }
2086
2087 page_mkwrite = 1; 2087 page_mkwrite = 1;
2088 } 2088 }
2089 dirty_page = old_page; 2089
2090 get_page(dirty_page); 2090 dirty_shared = true;
2091 2091
2092reuse: 2092reuse:
2093 /* 2093 /*
@@ -2106,43 +2106,29 @@ reuse:
2106 pte_unmap_unlock(page_table, ptl); 2106 pte_unmap_unlock(page_table, ptl);
2107 ret |= VM_FAULT_WRITE; 2107 ret |= VM_FAULT_WRITE;
2108 2108
2109 if (!dirty_page) 2109 if (dirty_shared) {
2110 return ret;
2111
2112 if (!page_mkwrite) {
2113 struct address_space *mapping; 2110 struct address_space *mapping;
2114 int dirtied; 2111 int dirtied;
2115 2112
2116 lock_page(dirty_page); 2113 if (!page_mkwrite)
2117 dirtied = set_page_dirty(dirty_page); 2114 lock_page(old_page);
2118 VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
2119 mapping = dirty_page->mapping;
2120 unlock_page(dirty_page);
2121 2115
2122 if (dirtied && mapping) { 2116 dirtied = set_page_dirty(old_page);
2123 /* 2117 VM_BUG_ON_PAGE(PageAnon(old_page), old_page);
2124 * Some device drivers do not set page.mapping 2118 mapping = old_page->mapping;
2125 * but still dirty their pages 2119 unlock_page(old_page);
2126 */ 2120 page_cache_release(old_page);
2127 balance_dirty_pages_ratelimited(mapping);
2128 }
2129 2121
2130 file_update_time(vma->vm_file); 2122 if ((dirtied || page_mkwrite) && mapping) {
2131 }
2132 put_page(dirty_page);
2133 if (page_mkwrite) {
2134 struct address_space *mapping = dirty_page->mapping;
2135
2136 set_page_dirty(dirty_page);
2137 unlock_page(dirty_page);
2138 page_cache_release(dirty_page);
2139 if (mapping) {
2140 /* 2123 /*
2141 * Some device drivers do not set page.mapping 2124 * Some device drivers do not set page.mapping
2142 * but still dirty their pages 2125 * but still dirty their pages
2143 */ 2126 */
2144 balance_dirty_pages_ratelimited(mapping); 2127 balance_dirty_pages_ratelimited(mapping);
2145 } 2128 }
2129
2130 if (!page_mkwrite)
2131 file_update_time(vma->vm_file);
2146 } 2132 }
2147 2133
2148 return ret; 2134 return ret;