aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c42
1 files changed, 32 insertions, 10 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 87b9e8ad4509..92e6757f196e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -316,7 +316,7 @@ void __init anon_vma_init(void)
316 */ 316 */
317struct anon_vma *page_lock_anon_vma(struct page *page) 317struct anon_vma *page_lock_anon_vma(struct page *page)
318{ 318{
319 struct anon_vma *anon_vma; 319 struct anon_vma *anon_vma, *root_anon_vma;
320 unsigned long anon_mapping; 320 unsigned long anon_mapping;
321 321
322 rcu_read_lock(); 322 rcu_read_lock();
@@ -327,8 +327,21 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
327 goto out; 327 goto out;
328 328
329 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 329 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
330 anon_vma_lock(anon_vma); 330 root_anon_vma = ACCESS_ONCE(anon_vma->root);
331 return anon_vma; 331 spin_lock(&root_anon_vma->lock);
332
333 /*
334 * If this page is still mapped, then its anon_vma cannot have been
335 * freed. But if it has been unmapped, we have no security against
336 * the anon_vma structure being freed and reused (for another anon_vma:
337 * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
338 * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
339 * anon_vma->root before page_unlock_anon_vma() is called to unlock.
340 */
341 if (page_mapped(page))
342 return anon_vma;
343
344 spin_unlock(&root_anon_vma->lock);
332out: 345out:
333 rcu_read_unlock(); 346 rcu_read_unlock();
334 return NULL; 347 return NULL;
@@ -368,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
368unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 381unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
369{ 382{
370 if (PageAnon(page)) { 383 if (PageAnon(page)) {
371 if (vma->anon_vma->root != page_anon_vma(page)->root) 384 struct anon_vma *page__anon_vma = page_anon_vma(page);
385 /*
386 * Note: swapoff's unuse_vma() is more efficient with this
387 * check, and needs it to match anon_vma when KSM is active.
388 */
389 if (!vma->anon_vma || !page__anon_vma ||
390 vma->anon_vma->root != page__anon_vma->root)
372 return -EFAULT; 391 return -EFAULT;
373 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 392 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
374 if (!vma->vm_file || 393 if (!vma->vm_file ||
@@ -1551,13 +1570,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
1551 struct vm_area_struct *vma, unsigned long address, int exclusive) 1570 struct vm_area_struct *vma, unsigned long address, int exclusive)
1552{ 1571{
1553 struct anon_vma *anon_vma = vma->anon_vma; 1572 struct anon_vma *anon_vma = vma->anon_vma;
1573
1554 BUG_ON(!anon_vma); 1574 BUG_ON(!anon_vma);
1555 if (!exclusive) { 1575
1556 struct anon_vma_chain *avc; 1576 if (PageAnon(page))
1557 avc = list_entry(vma->anon_vma_chain.prev, 1577 return;
1558 struct anon_vma_chain, same_vma); 1578 if (!exclusive)
1559 anon_vma = avc->anon_vma; 1579 anon_vma = anon_vma->root;
1560 } 1580
1561 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1581 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1562 page->mapping = (struct address_space *) anon_vma; 1582 page->mapping = (struct address_space *) anon_vma;
1563 page->index = linear_page_index(vma, address); 1583 page->index = linear_page_index(vma, address);
@@ -1568,6 +1588,8 @@ void hugepage_add_anon_rmap(struct page *page,
1568{ 1588{
1569 struct anon_vma *anon_vma = vma->anon_vma; 1589 struct anon_vma *anon_vma = vma->anon_vma;
1570 int first; 1590 int first;
1591
1592 BUG_ON(!PageLocked(page));
1571 BUG_ON(!anon_vma); 1593 BUG_ON(!anon_vma);
1572 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1594 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1573 first = atomic_inc_and_test(&page->_mapcount); 1595 first = atomic_inc_and_test(&page->_mapcount);