aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index eaa7a09eb72e..07fc94758799 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
133 goto out_enomem_free_avc; 133 goto out_enomem_free_avc;
134 allocated = anon_vma; 134 allocated = anon_vma;
135 } 135 }
136 spin_lock(&anon_vma->lock);
137 136
137 spin_lock(&anon_vma->lock);
138 /* page_table_lock to protect against threads */ 138 /* page_table_lock to protect against threads */
139 spin_lock(&mm->page_table_lock); 139 spin_lock(&mm->page_table_lock);
140 if (likely(!vma->anon_vma)) { 140 if (likely(!vma->anon_vma)) {
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma)
144 list_add(&avc->same_vma, &vma->anon_vma_chain); 144 list_add(&avc->same_vma, &vma->anon_vma_chain);
145 list_add(&avc->same_anon_vma, &anon_vma->head); 145 list_add(&avc->same_anon_vma, &anon_vma->head);
146 allocated = NULL; 146 allocated = NULL;
147 avc = NULL;
147 } 148 }
148 spin_unlock(&mm->page_table_lock); 149 spin_unlock(&mm->page_table_lock);
149
150 spin_unlock(&anon_vma->lock); 150 spin_unlock(&anon_vma->lock);
151 if (unlikely(allocated)) { 151
152 if (unlikely(allocated))
152 anon_vma_free(allocated); 153 anon_vma_free(allocated);
154 if (unlikely(avc))
153 anon_vma_chain_free(avc); 155 anon_vma_chain_free(avc);
154 }
155 } 156 }
156 return 0; 157 return 0;
157 158
@@ -182,7 +183,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
182{ 183{
183 struct anon_vma_chain *avc, *pavc; 184 struct anon_vma_chain *avc, *pavc;
184 185
185 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 186 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
186 avc = anon_vma_chain_alloc(); 187 avc = anon_vma_chain_alloc();
187 if (!avc) 188 if (!avc)
188 goto enomem_failure; 189 goto enomem_failure;
@@ -730,13 +731,29 @@ void page_move_anon_rmap(struct page *page,
730 * @page: the page to add the mapping to 731 * @page: the page to add the mapping to
731 * @vma: the vm area in which the mapping is added 732 * @vma: the vm area in which the mapping is added
732 * @address: the user virtual address mapped 733 * @address: the user virtual address mapped
734 * @exclusive: the page is exclusively owned by the current process
733 */ 735 */
734static void __page_set_anon_rmap(struct page *page, 736static void __page_set_anon_rmap(struct page *page,
735 struct vm_area_struct *vma, unsigned long address) 737 struct vm_area_struct *vma, unsigned long address, int exclusive)
736{ 738{
737 struct anon_vma *anon_vma = vma->anon_vma; 739 struct anon_vma *anon_vma = vma->anon_vma;
738 740
739 BUG_ON(!anon_vma); 741 BUG_ON(!anon_vma);
742
743 /*
744 * If the page isn't exclusively mapped into this vma,
745 * we must use the _oldest_ possible anon_vma for the
746 * page mapping!
747 *
748 * So take the last AVC chain entry in the vma, which is
749 * the deepest ancestor, and use the anon_vma from that.
750 */
751 if (!exclusive) {
752 struct anon_vma_chain *avc;
753 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
754 anon_vma = avc->anon_vma;
755 }
756
740 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 757 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
741 page->mapping = (struct address_space *) anon_vma; 758 page->mapping = (struct address_space *) anon_vma;
742 page->index = linear_page_index(vma, address); 759 page->index = linear_page_index(vma, address);
@@ -791,7 +808,7 @@ void page_add_anon_rmap(struct page *page,
791 VM_BUG_ON(!PageLocked(page)); 808 VM_BUG_ON(!PageLocked(page));
792 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 809 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
793 if (first) 810 if (first)
794 __page_set_anon_rmap(page, vma, address); 811 __page_set_anon_rmap(page, vma, address, 0);
795 else 812 else
796 __page_check_anon_rmap(page, vma, address); 813 __page_check_anon_rmap(page, vma, address);
797} 814}
@@ -813,7 +830,7 @@ void page_add_new_anon_rmap(struct page *page,
813 SetPageSwapBacked(page); 830 SetPageSwapBacked(page);
814 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 831 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
815 __inc_zone_page_state(page, NR_ANON_PAGES); 832 __inc_zone_page_state(page, NR_ANON_PAGES);
816 __page_set_anon_rmap(page, vma, address); 833 __page_set_anon_rmap(page, vma, address, 1);
817 if (page_evictable(page, vma)) 834 if (page_evictable(page, vma))
818 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 835 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
819 else 836 else