diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 34 |
1 files changed, 25 insertions, 9 deletions
| @@ -316,7 +316,7 @@ void __init anon_vma_init(void) | |||
| 316 | */ | 316 | */ |
| 317 | struct anon_vma *page_lock_anon_vma(struct page *page) | 317 | struct anon_vma *page_lock_anon_vma(struct page *page) |
| 318 | { | 318 | { |
| 319 | struct anon_vma *anon_vma; | 319 | struct anon_vma *anon_vma, *root_anon_vma; |
| 320 | unsigned long anon_mapping; | 320 | unsigned long anon_mapping; |
| 321 | 321 | ||
| 322 | rcu_read_lock(); | 322 | rcu_read_lock(); |
| @@ -327,8 +327,21 @@ struct anon_vma *page_lock_anon_vma(struct page *page) | |||
| 327 | goto out; | 327 | goto out; |
| 328 | 328 | ||
| 329 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | 329 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
| 330 | anon_vma_lock(anon_vma); | 330 | root_anon_vma = ACCESS_ONCE(anon_vma->root); |
| 331 | return anon_vma; | 331 | spin_lock(&root_anon_vma->lock); |
| 332 | |||
| 333 | /* | ||
| 334 | * If this page is still mapped, then its anon_vma cannot have been | ||
| 335 | * freed. But if it has been unmapped, we have no security against | ||
| 336 | * the anon_vma structure being freed and reused (for another anon_vma: | ||
| 337 | * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot | ||
| 338 | * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting | ||
| 339 | * anon_vma->root before page_unlock_anon_vma() is called to unlock. | ||
| 340 | */ | ||
| 341 | if (page_mapped(page)) | ||
| 342 | return anon_vma; | ||
| 343 | |||
| 344 | spin_unlock(&root_anon_vma->lock); | ||
| 332 | out: | 345 | out: |
| 333 | rcu_read_unlock(); | 346 | rcu_read_unlock(); |
| 334 | return NULL; | 347 | return NULL; |
| @@ -1551,13 +1564,14 @@ static void __hugepage_set_anon_rmap(struct page *page, | |||
| 1551 | struct vm_area_struct *vma, unsigned long address, int exclusive) | 1564 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
| 1552 | { | 1565 | { |
| 1553 | struct anon_vma *anon_vma = vma->anon_vma; | 1566 | struct anon_vma *anon_vma = vma->anon_vma; |
| 1567 | |||
| 1554 | BUG_ON(!anon_vma); | 1568 | BUG_ON(!anon_vma); |
| 1555 | if (!exclusive) { | 1569 | |
| 1556 | struct anon_vma_chain *avc; | 1570 | if (PageAnon(page)) |
| 1557 | avc = list_entry(vma->anon_vma_chain.prev, | 1571 | return; |
| 1558 | struct anon_vma_chain, same_vma); | 1572 | if (!exclusive) |
| 1559 | anon_vma = avc->anon_vma; | 1573 | anon_vma = anon_vma->root; |
| 1560 | } | 1574 | |
| 1561 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 1575 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
| 1562 | page->mapping = (struct address_space *) anon_vma; | 1576 | page->mapping = (struct address_space *) anon_vma; |
| 1563 | page->index = linear_page_index(vma, address); | 1577 | page->index = linear_page_index(vma, address); |
| @@ -1568,6 +1582,8 @@ void hugepage_add_anon_rmap(struct page *page, | |||
| 1568 | { | 1582 | { |
| 1569 | struct anon_vma *anon_vma = vma->anon_vma; | 1583 | struct anon_vma *anon_vma = vma->anon_vma; |
| 1570 | int first; | 1584 | int first; |
| 1585 | |||
| 1586 | BUG_ON(!PageLocked(page)); | ||
| 1571 | BUG_ON(!anon_vma); | 1587 | BUG_ON(!anon_vma); |
| 1572 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1588 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
| 1573 | first = atomic_inc_and_test(&page->_mapcount); | 1589 | first = atomic_inc_and_test(&page->_mapcount); |
