summaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c66
1 files changed, 65 insertions, 1 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index def01d86e36f..3320616ed93f 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -191,6 +191,52 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
191 drop_rmap_locks(vma); 191 drop_rmap_locks(vma);
192} 192}
193 193
194#ifdef CONFIG_HAVE_MOVE_PMD
195static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
196 unsigned long new_addr, unsigned long old_end,
197 pmd_t *old_pmd, pmd_t *new_pmd)
198{
199 spinlock_t *old_ptl, *new_ptl;
200 struct mm_struct *mm = vma->vm_mm;
201 pmd_t pmd;
202
203 if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
204 || old_end - old_addr < PMD_SIZE)
205 return false;
206
207 /*
208 * The destination pmd shouldn't be established, free_pgtables()
209 * should have release it.
210 */
211 if (WARN_ON(!pmd_none(*new_pmd)))
212 return false;
213
214 /*
215 * We don't have to worry about the ordering of src and dst
216 * ptlocks because exclusive mmap_sem prevents deadlock.
217 */
218 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
219 new_ptl = pmd_lockptr(mm, new_pmd);
220 if (new_ptl != old_ptl)
221 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
222
223 /* Clear the pmd */
224 pmd = *old_pmd;
225 pmd_clear(old_pmd);
226
227 VM_BUG_ON(!pmd_none(*new_pmd));
228
229 /* Set the new pmd */
230 set_pmd_at(mm, new_addr, new_pmd, pmd);
231 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
232 if (new_ptl != old_ptl)
233 spin_unlock(new_ptl);
234 spin_unlock(old_ptl);
235
236 return true;
237}
238#endif
239
194unsigned long move_page_tables(struct vm_area_struct *vma, 240unsigned long move_page_tables(struct vm_area_struct *vma,
195 unsigned long old_addr, struct vm_area_struct *new_vma, 241 unsigned long old_addr, struct vm_area_struct *new_vma,
196 unsigned long new_addr, unsigned long len, 242 unsigned long new_addr, unsigned long len,
@@ -235,8 +281,26 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
235 split_huge_pmd(vma, old_pmd, old_addr); 281 split_huge_pmd(vma, old_pmd, old_addr);
236 if (pmd_trans_unstable(old_pmd)) 282 if (pmd_trans_unstable(old_pmd))
237 continue; 283 continue;
284 } else if (extent == PMD_SIZE) {
285#ifdef CONFIG_HAVE_MOVE_PMD
286 /*
287 * If the extent is PMD-sized, try to speed the move by
288 * moving at the PMD level if possible.
289 */
290 bool moved;
291
292 if (need_rmap_locks)
293 take_rmap_locks(vma);
294 moved = move_normal_pmd(vma, old_addr, new_addr,
295 old_end, old_pmd, new_pmd);
296 if (need_rmap_locks)
297 drop_rmap_locks(vma);
298 if (moved)
299 continue;
300#endif
238 } 301 }
239 if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) 302
303 if (pte_alloc(new_vma->vm_mm, new_pmd))
240 break; 304 break;
241 next = (new_addr + PMD_SIZE) & PMD_MASK; 305 next = (new_addr + PMD_SIZE) & PMD_MASK;
242 if (extent > next - new_addr) 306 if (extent > next - new_addr)