aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShachar Raindel <raindel@mellanox.com>2015-04-14 18:46:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:03 -0400
commit28766805275c12c2298883cece3f98505ac764b4 (patch)
tree332f2b844899b5c2f86500bdd837bff047e01eb1
parent4e047f897771222215ee572e1c0b25e9417376eb (diff)
mm: refactor do_wp_page - rewrite the unlock flow
When do_wp_page is ending, in several cases it needs to unlock the pages and ptls it was accessing. Currently, this logic was "called" by using a goto jump. This makes following the control flow of the function harder. Readability was further hampered by the unlock case containing large amount of logic needed only in one of the 3 cases. Using goto for cleanup is generally allowed. However, moving the trivial unlocking flows to the relevant call sites allow deeper refactoring in the next patch. Signed-off-by: Shachar Raindel <raindel@mellanox.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Haggai Eran <haggaie@mellanox.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Peter Feiner <pfeiner@google.com> Cc: Michel Lespinasse <walken@google.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memory.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e70685f3e836..0e28fddafdaf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2066,7 +2066,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2066{ 2066{
2067 struct page *old_page, *new_page = NULL; 2067 struct page *old_page, *new_page = NULL;
2068 pte_t entry; 2068 pte_t entry;
2069 int ret = 0; 2069 int page_copied = 0;
2070 unsigned long mmun_start = 0; /* For mmu_notifiers */ 2070 unsigned long mmun_start = 0; /* For mmu_notifiers */
2071 unsigned long mmun_end = 0; /* For mmu_notifiers */ 2071 unsigned long mmun_end = 0; /* For mmu_notifiers */
2072 struct mem_cgroup *memcg; 2072 struct mem_cgroup *memcg;
@@ -2101,7 +2101,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2101 &ptl); 2101 &ptl);
2102 if (!pte_same(*page_table, orig_pte)) { 2102 if (!pte_same(*page_table, orig_pte)) {
2103 unlock_page(old_page); 2103 unlock_page(old_page);
2104 goto unlock; 2104 pte_unmap_unlock(page_table, ptl);
2105 page_cache_release(old_page);
2106 return 0;
2105 } 2107 }
2106 page_cache_release(old_page); 2108 page_cache_release(old_page);
2107 } 2109 }
@@ -2148,7 +2150,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2148 &ptl); 2150 &ptl);
2149 if (!pte_same(*page_table, orig_pte)) { 2151 if (!pte_same(*page_table, orig_pte)) {
2150 unlock_page(old_page); 2152 unlock_page(old_page);
2151 goto unlock; 2153 pte_unmap_unlock(page_table, ptl);
2154 page_cache_release(old_page);
2155 return 0;
2152 } 2156 }
2153 page_mkwrite = 1; 2157 page_mkwrite = 1;
2154 } 2158 }
@@ -2246,29 +2250,28 @@ gotten:
2246 2250
2247 /* Free the old page.. */ 2251 /* Free the old page.. */
2248 new_page = old_page; 2252 new_page = old_page;
2249 ret |= VM_FAULT_WRITE; 2253 page_copied = 1;
2250 } else 2254 } else
2251 mem_cgroup_cancel_charge(new_page, memcg); 2255 mem_cgroup_cancel_charge(new_page, memcg);
2252 2256
2253 if (new_page) 2257 if (new_page)
2254 page_cache_release(new_page); 2258 page_cache_release(new_page);
2255unlock: 2259
2256 pte_unmap_unlock(page_table, ptl); 2260 pte_unmap_unlock(page_table, ptl);
2257 if (mmun_end > mmun_start) 2261 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2258 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2259 if (old_page) { 2262 if (old_page) {
2260 /* 2263 /*
2261 * Don't let another task, with possibly unlocked vma, 2264 * Don't let another task, with possibly unlocked vma,
2262 * keep the mlocked page. 2265 * keep the mlocked page.
2263 */ 2266 */
2264 if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { 2267 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2265 lock_page(old_page); /* LRU manipulation */ 2268 lock_page(old_page); /* LRU manipulation */
2266 munlock_vma_page(old_page); 2269 munlock_vma_page(old_page);
2267 unlock_page(old_page); 2270 unlock_page(old_page);
2268 } 2271 }
2269 page_cache_release(old_page); 2272 page_cache_release(old_page);
2270 } 2273 }
2271 return ret; 2274 return page_copied ? VM_FAULT_WRITE : 0;
2272oom_free_new: 2275oom_free_new:
2273 page_cache_release(new_page); 2276 page_cache_release(new_page);
2274oom: 2277oom: