diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 22 |
1 files changed, 15 insertions, 7 deletions
@@ -352,6 +352,11 @@ void __init anon_vma_init(void) | |||
352 | * The page might have been remapped to a different anon_vma or the anon_vma | 352 | * The page might have been remapped to a different anon_vma or the anon_vma |
353 | * returned may already be freed (and even reused). | 353 | * returned may already be freed (and even reused). |
354 | * | 354 | * |
355 | * In case it was remapped to a different anon_vma, the new anon_vma will be a | ||
356 | * child of the old anon_vma, and the anon_vma lifetime rules will therefore | ||
357 | * ensure that any anon_vma obtained from the page will still be valid for as | ||
358 | * long as we observe page_mapped() [ hence all those page_mapped() tests ]. | ||
359 | * | ||
355 | * All users of this function must be very careful when walking the anon_vma | 360 | * All users of this function must be very careful when walking the anon_vma |
356 | * chain and verify that the page in question is indeed mapped in it | 361 | * chain and verify that the page in question is indeed mapped in it |
357 | * [ something equivalent to page_mapped_in_vma() ]. | 362 | * [ something equivalent to page_mapped_in_vma() ]. |
@@ -405,6 +410,7 @@ out: | |||
405 | struct anon_vma *page_lock_anon_vma(struct page *page) | 410 | struct anon_vma *page_lock_anon_vma(struct page *page) |
406 | { | 411 | { |
407 | struct anon_vma *anon_vma = NULL; | 412 | struct anon_vma *anon_vma = NULL; |
413 | struct anon_vma *root_anon_vma; | ||
408 | unsigned long anon_mapping; | 414 | unsigned long anon_mapping; |
409 | 415 | ||
410 | rcu_read_lock(); | 416 | rcu_read_lock(); |
@@ -415,13 +421,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page) | |||
415 | goto out; | 421 | goto out; |
416 | 422 | ||
417 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | 423 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
418 | if (mutex_trylock(&anon_vma->root->mutex)) { | 424 | root_anon_vma = ACCESS_ONCE(anon_vma->root); |
425 | if (mutex_trylock(&root_anon_vma->mutex)) { | ||
419 | /* | 426 | /* |
420 | * If we observe a !0 refcount, then holding the lock ensures | 427 | * If the page is still mapped, then this anon_vma is still |
421 | * the anon_vma will not go away, see __put_anon_vma(). | 428 | * its anon_vma, and holding the mutex ensures that it will |
429 | * not go away, see anon_vma_free(). | ||
422 | */ | 430 | */ |
423 | if (!atomic_read(&anon_vma->refcount)) { | 431 | if (!page_mapped(page)) { |
424 | anon_vma_unlock(anon_vma); | 432 | mutex_unlock(&root_anon_vma->mutex); |
425 | anon_vma = NULL; | 433 | anon_vma = NULL; |
426 | } | 434 | } |
427 | goto out; | 435 | goto out; |
@@ -1014,7 +1022,7 @@ void do_page_add_anon_rmap(struct page *page, | |||
1014 | return; | 1022 | return; |
1015 | 1023 | ||
1016 | VM_BUG_ON(!PageLocked(page)); | 1024 | VM_BUG_ON(!PageLocked(page)); |
1017 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1025 | /* address might be in next vma when migration races vma_adjust */ |
1018 | if (first) | 1026 | if (first) |
1019 | __page_set_anon_rmap(page, vma, address, exclusive); | 1027 | __page_set_anon_rmap(page, vma, address, exclusive); |
1020 | else | 1028 | else |
@@ -1709,7 +1717,7 @@ void hugepage_add_anon_rmap(struct page *page, | |||
1709 | 1717 | ||
1710 | BUG_ON(!PageLocked(page)); | 1718 | BUG_ON(!PageLocked(page)); |
1711 | BUG_ON(!anon_vma); | 1719 | BUG_ON(!anon_vma); |
1712 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1720 | /* address might be in next vma when migration races vma_adjust */ |
1713 | first = atomic_inc_and_test(&page->_mapcount); | 1721 | first = atomic_inc_and_test(&page->_mapcount); |
1714 | if (first) | 1722 | if (first) |
1715 | __hugepage_set_anon_rmap(page, vma, address, 0); | 1723 | __hugepage_set_anon_rmap(page, vma, address, 0); |