summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2017-02-24 17:58:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:55 -0500
commit3fe87967c536e828bf1ea14b3ec3827d1101152e (patch)
tree841110b9e7ce1d1c4ea3ef213e3c3e4f4a349e46 /mm
parentd53a8b49a626fdfce4390710da6d04b4314db25f (diff)
mm: convert remove_migration_pte() to use page_vma_mapped_walk()
remove_migration_pte() also can easily be converted to page_vma_mapped_walk(). [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20170129173858.45174-13-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c102
1 files changed, 41 insertions, 61 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 6807174e0715..2c63ac06791b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -193,82 +193,62 @@ void putback_movable_pages(struct list_head *l)
193/* 193/*
194 * Restore a potential migration pte to a working pte entry 194 * Restore a potential migration pte to a working pte entry
195 */ 195 */
196static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, 196static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
197 unsigned long addr, void *old) 197 unsigned long addr, void *old)
198{ 198{
199 struct mm_struct *mm = vma->vm_mm; 199 struct page_vma_mapped_walk pvmw = {
200 .page = old,
201 .vma = vma,
202 .address = addr,
203 .flags = PVMW_SYNC | PVMW_MIGRATION,
204 };
205 struct page *new;
206 pte_t pte;
200 swp_entry_t entry; 207 swp_entry_t entry;
201 pmd_t *pmd;
202 pte_t *ptep, pte;
203 spinlock_t *ptl;
204 208
205 if (unlikely(PageHuge(new))) { 209 VM_BUG_ON_PAGE(PageTail(page), page);
206 ptep = huge_pte_offset(mm, addr); 210 while (page_vma_mapped_walk(&pvmw)) {
207 if (!ptep) 211 new = page - pvmw.page->index +
208 goto out; 212 linear_page_index(vma, pvmw.address);
209 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
210 } else {
211 pmd = mm_find_pmd(mm, addr);
212 if (!pmd)
213 goto out;
214 213
215 ptep = pte_offset_map(pmd, addr); 214 get_page(new);
215 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
216 if (pte_swp_soft_dirty(*pvmw.pte))
217 pte = pte_mksoft_dirty(pte);
216 218
217 /* 219 /*
218 * Peek to check is_swap_pte() before taking ptlock? No, we 220 * Recheck VMA as permissions can change since migration started
219 * can race mremap's move_ptes(), which skips anon_vma lock.
220 */ 221 */
221 222 entry = pte_to_swp_entry(*pvmw.pte);
222 ptl = pte_lockptr(mm, pmd); 223 if (is_write_migration_entry(entry))
223 } 224 pte = maybe_mkwrite(pte, vma);
224
225 spin_lock(ptl);
226 pte = *ptep;
227 if (!is_swap_pte(pte))
228 goto unlock;
229
230 entry = pte_to_swp_entry(pte);
231
232 if (!is_migration_entry(entry) ||
233 migration_entry_to_page(entry) != old)
234 goto unlock;
235
236 get_page(new);
237 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
238 if (pte_swp_soft_dirty(*ptep))
239 pte = pte_mksoft_dirty(pte);
240
241 /* Recheck VMA as permissions can change since migration started */
242 if (is_write_migration_entry(entry))
243 pte = maybe_mkwrite(pte, vma);
244 225
245#ifdef CONFIG_HUGETLB_PAGE 226#ifdef CONFIG_HUGETLB_PAGE
246 if (PageHuge(new)) { 227 if (PageHuge(new)) {
247 pte = pte_mkhuge(pte); 228 pte = pte_mkhuge(pte);
248 pte = arch_make_huge_pte(pte, vma, new, 0); 229 pte = arch_make_huge_pte(pte, vma, new, 0);
249 } 230 }
250#endif 231#endif
251 flush_dcache_page(new); 232 flush_dcache_page(new);
252 set_pte_at(mm, addr, ptep, pte); 233 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
253 234
254 if (PageHuge(new)) { 235 if (PageHuge(new)) {
255 if (PageAnon(new)) 236 if (PageAnon(new))
256 hugepage_add_anon_rmap(new, vma, addr); 237 hugepage_add_anon_rmap(new, vma, pvmw.address);
238 else
239 page_dup_rmap(new, true);
240 } else if (PageAnon(new))
241 page_add_anon_rmap(new, vma, pvmw.address, false);
257 else 242 else
258 page_dup_rmap(new, true); 243 page_add_file_rmap(new, false);
259 } else if (PageAnon(new))
260 page_add_anon_rmap(new, vma, addr, false);
261 else
262 page_add_file_rmap(new, false);
263 244
264 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 245 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
265 mlock_vma_page(new); 246 mlock_vma_page(new);
247
248 /* No need to invalidate - it was non-present before */
249 update_mmu_cache(vma, pvmw.address, pvmw.pte);
250 }
266 251
267 /* No need to invalidate - it was non-present before */
268 update_mmu_cache(vma, addr, ptep);
269unlock:
270 pte_unmap_unlock(ptep, ptl);
271out:
272 return SWAP_AGAIN; 252 return SWAP_AGAIN;
273} 253}
274 254