aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index eaa7a09eb72e..526704e8215d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -182,7 +182,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
182{ 182{
183 struct anon_vma_chain *avc, *pavc; 183 struct anon_vma_chain *avc, *pavc;
184 184
185 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 185 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
186 avc = anon_vma_chain_alloc(); 186 avc = anon_vma_chain_alloc();
187 if (!avc) 187 if (!avc)
188 goto enomem_failure; 188 goto enomem_failure;
@@ -730,13 +730,29 @@ void page_move_anon_rmap(struct page *page,
730 * @page: the page to add the mapping to 730 * @page: the page to add the mapping to
731 * @vma: the vm area in which the mapping is added 731 * @vma: the vm area in which the mapping is added
732 * @address: the user virtual address mapped 732 * @address: the user virtual address mapped
733 * @exclusive: the page is exclusively owned by the current process
733 */ 734 */
734static void __page_set_anon_rmap(struct page *page, 735static void __page_set_anon_rmap(struct page *page,
735 struct vm_area_struct *vma, unsigned long address) 736 struct vm_area_struct *vma, unsigned long address, int exclusive)
736{ 737{
737 struct anon_vma *anon_vma = vma->anon_vma; 738 struct anon_vma *anon_vma = vma->anon_vma;
738 739
739 BUG_ON(!anon_vma); 740 BUG_ON(!anon_vma);
741
742 /*
743 * If the page isn't exclusively mapped into this vma,
744 * we must use the _oldest_ possible anon_vma for the
745 * page mapping!
746 *
747 * So take the last AVC chain entry in the vma, which is
748 * the deepest ancestor, and use the anon_vma from that.
749 */
750 if (!exclusive) {
751 struct anon_vma_chain *avc;
752 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
753 anon_vma = avc->anon_vma;
754 }
755
740 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 756 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
741 page->mapping = (struct address_space *) anon_vma; 757 page->mapping = (struct address_space *) anon_vma;
742 page->index = linear_page_index(vma, address); 758 page->index = linear_page_index(vma, address);
@@ -791,7 +807,7 @@ void page_add_anon_rmap(struct page *page,
791 VM_BUG_ON(!PageLocked(page)); 807 VM_BUG_ON(!PageLocked(page));
792 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 808 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
793 if (first) 809 if (first)
794 __page_set_anon_rmap(page, vma, address); 810 __page_set_anon_rmap(page, vma, address, 0);
795 else 811 else
796 __page_check_anon_rmap(page, vma, address); 812 __page_check_anon_rmap(page, vma, address);
797} 813}
@@ -813,7 +829,7 @@ void page_add_new_anon_rmap(struct page *page,
813 SetPageSwapBacked(page); 829 SetPageSwapBacked(page);
814 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 830 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
815 __inc_zone_page_state(page, NR_ANON_PAGES); 831 __inc_zone_page_state(page, NR_ANON_PAGES);
816 __page_set_anon_rmap(page, vma, address); 832 __page_set_anon_rmap(page, vma, address, 1);
817 if (page_evictable(page, vma)) 833 if (page_evictable(page, vma))
818 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 834 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
819 else 835 else