aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2016-05-19 20:12:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commit1d069b7dd56728a0eb6acb138dce0d37600dee00 (patch)
tree29a891a8640bbf0356037d39a115e6ca7a995a75
parentbf8616d5fa179d6c755f06726567c6d63c6fbbc7 (diff)
huge pagecache: extend mremap pmd rmap lockout to files
Whatever huge pagecache implementation we go with, file rmap locking must be added to anon rmap locking, when mremap's move_page_tables() finds a pmd_trans_huge pmd entry: a simple change, let's do it now. Factor out take_rmap_locks() and drop_rmap_locks() to handle the locking for make move_ptes() and move_page_tables(), and delete the VM_BUG_ON_VMA which rejected vm_file and required anon_vma. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/mremap.c42
1 files changed, 22 insertions, 20 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index 7d98fe1adc12..9dc499977924 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -70,6 +70,22 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
70 return pmd; 70 return pmd;
71} 71}
72 72
73static void take_rmap_locks(struct vm_area_struct *vma)
74{
75 if (vma->vm_file)
76 i_mmap_lock_write(vma->vm_file->f_mapping);
77 if (vma->anon_vma)
78 anon_vma_lock_write(vma->anon_vma);
79}
80
81static void drop_rmap_locks(struct vm_area_struct *vma)
82{
83 if (vma->anon_vma)
84 anon_vma_unlock_write(vma->anon_vma);
85 if (vma->vm_file)
86 i_mmap_unlock_write(vma->vm_file->f_mapping);
87}
88
73static pte_t move_soft_dirty_pte(pte_t pte) 89static pte_t move_soft_dirty_pte(pte_t pte)
74{ 90{
75 /* 91 /*
@@ -90,8 +106,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
90 struct vm_area_struct *new_vma, pmd_t *new_pmd, 106 struct vm_area_struct *new_vma, pmd_t *new_pmd,
91 unsigned long new_addr, bool need_rmap_locks) 107 unsigned long new_addr, bool need_rmap_locks)
92{ 108{
93 struct address_space *mapping = NULL;
94 struct anon_vma *anon_vma = NULL;
95 struct mm_struct *mm = vma->vm_mm; 109 struct mm_struct *mm = vma->vm_mm;
96 pte_t *old_pte, *new_pte, pte; 110 pte_t *old_pte, *new_pte, pte;
97 spinlock_t *old_ptl, *new_ptl; 111 spinlock_t *old_ptl, *new_ptl;
@@ -114,16 +128,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
114 * serialize access to individual ptes, but only rmap traversal 128 * serialize access to individual ptes, but only rmap traversal
115 * order guarantees that we won't miss both the old and new ptes). 129 * order guarantees that we won't miss both the old and new ptes).
116 */ 130 */
117 if (need_rmap_locks) { 131 if (need_rmap_locks)
118 if (vma->vm_file) { 132 take_rmap_locks(vma);
119 mapping = vma->vm_file->f_mapping;
120 i_mmap_lock_write(mapping);
121 }
122 if (vma->anon_vma) {
123 anon_vma = vma->anon_vma;
124 anon_vma_lock_write(anon_vma);
125 }
126 }
127 133
128 /* 134 /*
129 * We don't have to worry about the ordering of src and dst 135 * We don't have to worry about the ordering of src and dst
@@ -151,10 +157,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
151 spin_unlock(new_ptl); 157 spin_unlock(new_ptl);
152 pte_unmap(new_pte - 1); 158 pte_unmap(new_pte - 1);
153 pte_unmap_unlock(old_pte - 1, old_ptl); 159 pte_unmap_unlock(old_pte - 1, old_ptl);
154 if (anon_vma) 160 if (need_rmap_locks)
155 anon_vma_unlock_write(anon_vma); 161 drop_rmap_locks(vma);
156 if (mapping)
157 i_mmap_unlock_write(mapping);
158} 162}
159 163
160#define LATENCY_LIMIT (64 * PAGE_SIZE) 164#define LATENCY_LIMIT (64 * PAGE_SIZE)
@@ -193,15 +197,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
193 if (pmd_trans_huge(*old_pmd)) { 197 if (pmd_trans_huge(*old_pmd)) {
194 if (extent == HPAGE_PMD_SIZE) { 198 if (extent == HPAGE_PMD_SIZE) {
195 bool moved; 199 bool moved;
196 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
197 vma);
198 /* See comment in move_ptes() */ 200 /* See comment in move_ptes() */
199 if (need_rmap_locks) 201 if (need_rmap_locks)
200 anon_vma_lock_write(vma->anon_vma); 202 take_rmap_locks(vma);
201 moved = move_huge_pmd(vma, old_addr, new_addr, 203 moved = move_huge_pmd(vma, old_addr, new_addr,
202 old_end, old_pmd, new_pmd); 204 old_end, old_pmd, new_pmd);
203 if (need_rmap_locks) 205 if (need_rmap_locks)
204 anon_vma_unlock_write(vma->anon_vma); 206 drop_rmap_locks(vma);
205 if (moved) { 207 if (moved) {
206 need_flush = true; 208 need_flush = true;
207 continue; 209 continue;