diff options
author | Shachar Raindel <raindel@mellanox.com> | 2015-04-14 18:46:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:03 -0400 |
commit | 4e047f897771222215ee572e1c0b25e9417376eb (patch) | |
tree | 21e462e6030ec8cbd62edd7957005704bf5f2282 /mm/memory.c | |
parent | 5a3fbef325e872f831cf13171c7032915bb1916d (diff) |
mm: refactor do_wp_page, extract the reuse case
Currently do_wp_page contains 265 code lines. It also contains 9 goto
statements, of which 5 are targeting labels which are not cleanup
related. This makes the function extremely difficult to understand.
The following patches are an attempt at breaking the function to its
basic components, and making it easier to understand.
The patches are straight forward function extractions from do_wp_page.
As we extract functions, we remove unneeded parameters and simplify the
code as much as possible. However, the functionality is supposed to
remain completely unchanged. The patches also attempt to document the
functionality of each extracted function. In patch 2, we split the
unlock logic to the contain logic relevant to specific needs of each use
case, instead of having huge number of conditional decisions in a single
unlock flow.
This patch (of 4):
When do_wp_page is ending, in several cases it needs to reuse the existing
page. This is achieved by making the page table writable, and possibly
updating the page-cache state.
Currently, this logic was "called" by using a goto jump. This makes
following the control flow of the function harder. It is also against the
coding style guidelines for using goto.
As the code can easily be refactored into a specialized function, refactor
it out and simplify the code flow in do_wp_page.
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Haggai Eran <haggaie@mellanox.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Peter Feiner <pfeiner@google.com>
Cc: Michel Lespinasse <walken@google.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 117 |
1 files changed, 68 insertions, 49 deletions
diff --git a/mm/memory.c b/mm/memory.c index 97839f5c8c30..e70685f3e836 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1983,6 +1983,65 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |||
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | /* | 1985 | /* |
1986 | * Handle write page faults for pages that can be reused in the current vma | ||
1987 | * | ||
1988 | * This can happen either due to the mapping being with the VM_SHARED flag, | ||
1989 | * or due to us being the last reference standing to the page. In either | ||
1990 | * case, all we need to do here is to mark the page as writable and update | ||
1991 | * any related book-keeping. | ||
1992 | */ | ||
1993 | static inline int wp_page_reuse(struct mm_struct *mm, | ||
1994 | struct vm_area_struct *vma, unsigned long address, | ||
1995 | pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, | ||
1996 | struct page *page, int page_mkwrite, | ||
1997 | int dirty_shared) | ||
1998 | __releases(ptl) | ||
1999 | { | ||
2000 | pte_t entry; | ||
2001 | /* | ||
2002 | * Clear the pages cpupid information as the existing | ||
2003 | * information potentially belongs to a now completely | ||
2004 | * unrelated process. | ||
2005 | */ | ||
2006 | if (page) | ||
2007 | page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); | ||
2008 | |||
2009 | flush_cache_page(vma, address, pte_pfn(orig_pte)); | ||
2010 | entry = pte_mkyoung(orig_pte); | ||
2011 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
2012 | if (ptep_set_access_flags(vma, address, page_table, entry, 1)) | ||
2013 | update_mmu_cache(vma, address, page_table); | ||
2014 | pte_unmap_unlock(page_table, ptl); | ||
2015 | |||
2016 | if (dirty_shared) { | ||
2017 | struct address_space *mapping; | ||
2018 | int dirtied; | ||
2019 | |||
2020 | if (!page_mkwrite) | ||
2021 | lock_page(page); | ||
2022 | |||
2023 | dirtied = set_page_dirty(page); | ||
2024 | VM_BUG_ON_PAGE(PageAnon(page), page); | ||
2025 | mapping = page->mapping; | ||
2026 | unlock_page(page); | ||
2027 | page_cache_release(page); | ||
2028 | |||
2029 | if ((dirtied || page_mkwrite) && mapping) { | ||
2030 | /* | ||
2031 | * Some device drivers do not set page.mapping | ||
2032 | * but still dirty their pages | ||
2033 | */ | ||
2034 | balance_dirty_pages_ratelimited(mapping); | ||
2035 | } | ||
2036 | |||
2037 | if (!page_mkwrite) | ||
2038 | file_update_time(vma->vm_file); | ||
2039 | } | ||
2040 | |||
2041 | return VM_FAULT_WRITE; | ||
2042 | } | ||
2043 | |||
2044 | /* | ||
1986 | * This routine handles present pages, when users try to write | 2045 | * This routine handles present pages, when users try to write |
1987 | * to a shared page. It is done by copying the page to a new address | 2046 | * to a shared page. It is done by copying the page to a new address |
1988 | * and decrementing the shared-page counter for the old page. | 2047 | * and decrementing the shared-page counter for the old page. |
@@ -2008,8 +2067,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2008 | struct page *old_page, *new_page = NULL; | 2067 | struct page *old_page, *new_page = NULL; |
2009 | pte_t entry; | 2068 | pte_t entry; |
2010 | int ret = 0; | 2069 | int ret = 0; |
2011 | int page_mkwrite = 0; | ||
2012 | bool dirty_shared = false; | ||
2013 | unsigned long mmun_start = 0; /* For mmu_notifiers */ | 2070 | unsigned long mmun_start = 0; /* For mmu_notifiers */ |
2014 | unsigned long mmun_end = 0; /* For mmu_notifiers */ | 2071 | unsigned long mmun_end = 0; /* For mmu_notifiers */ |
2015 | struct mem_cgroup *memcg; | 2072 | struct mem_cgroup *memcg; |
@@ -2026,7 +2083,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2026 | */ | 2083 | */ |
2027 | if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == | 2084 | if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == |
2028 | (VM_WRITE|VM_SHARED)) | 2085 | (VM_WRITE|VM_SHARED)) |
2029 | goto reuse; | 2086 | return wp_page_reuse(mm, vma, address, page_table, ptl, |
2087 | orig_pte, old_page, 0, 0); | ||
2030 | goto gotten; | 2088 | goto gotten; |
2031 | } | 2089 | } |
2032 | 2090 | ||
@@ -2055,12 +2113,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2055 | */ | 2113 | */ |
2056 | page_move_anon_rmap(old_page, vma, address); | 2114 | page_move_anon_rmap(old_page, vma, address); |
2057 | unlock_page(old_page); | 2115 | unlock_page(old_page); |
2058 | goto reuse; | 2116 | return wp_page_reuse(mm, vma, address, page_table, ptl, |
2117 | orig_pte, old_page, 0, 0); | ||
2059 | } | 2118 | } |
2060 | unlock_page(old_page); | 2119 | unlock_page(old_page); |
2061 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == | 2120 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == |
2062 | (VM_WRITE|VM_SHARED))) { | 2121 | (VM_WRITE|VM_SHARED))) { |
2122 | int page_mkwrite = 0; | ||
2123 | |||
2063 | page_cache_get(old_page); | 2124 | page_cache_get(old_page); |
2125 | |||
2064 | /* | 2126 | /* |
2065 | * Only catch write-faults on shared writable pages, | 2127 | * Only catch write-faults on shared writable pages, |
2066 | * read-only shared pages can get COWed by | 2128 | * read-only shared pages can get COWed by |
@@ -2091,51 +2153,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2091 | page_mkwrite = 1; | 2153 | page_mkwrite = 1; |
2092 | } | 2154 | } |
2093 | 2155 | ||
2094 | dirty_shared = true; | 2156 | return wp_page_reuse(mm, vma, address, page_table, ptl, |
2095 | 2157 | orig_pte, old_page, page_mkwrite, 1); | |
2096 | reuse: | ||
2097 | /* | ||
2098 | * Clear the pages cpupid information as the existing | ||
2099 | * information potentially belongs to a now completely | ||
2100 | * unrelated process. | ||
2101 | */ | ||
2102 | if (old_page) | ||
2103 | page_cpupid_xchg_last(old_page, (1 << LAST_CPUPID_SHIFT) - 1); | ||
2104 | |||
2105 | flush_cache_page(vma, address, pte_pfn(orig_pte)); | ||
2106 | entry = pte_mkyoung(orig_pte); | ||
2107 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
2108 | if (ptep_set_access_flags(vma, address, page_table, entry,1)) | ||
2109 | update_mmu_cache(vma, address, page_table); | ||
2110 | pte_unmap_unlock(page_table, ptl); | ||
2111 | ret |= VM_FAULT_WRITE; | ||
2112 | |||
2113 | if (dirty_shared) { | ||
2114 | struct address_space *mapping; | ||
2115 | int dirtied; | ||
2116 | |||
2117 | if (!page_mkwrite) | ||
2118 | lock_page(old_page); | ||
2119 | |||
2120 | dirtied = set_page_dirty(old_page); | ||
2121 | VM_BUG_ON_PAGE(PageAnon(old_page), old_page); | ||
2122 | mapping = old_page->mapping; | ||
2123 | unlock_page(old_page); | ||
2124 | page_cache_release(old_page); | ||
2125 | |||
2126 | if ((dirtied || page_mkwrite) && mapping) { | ||
2127 | /* | ||
2128 | * Some device drivers do not set page.mapping | ||
2129 | * but still dirty their pages | ||
2130 | */ | ||
2131 | balance_dirty_pages_ratelimited(mapping); | ||
2132 | } | ||
2133 | |||
2134 | if (!page_mkwrite) | ||
2135 | file_update_time(vma->vm_file); | ||
2136 | } | ||
2137 | |||
2138 | return ret; | ||
2139 | } | 2158 | } |
2140 | 2159 | ||
2141 | /* | 2160 | /* |