aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index fcd593c9c997..07fc94758799 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
133 goto out_enomem_free_avc; 133 goto out_enomem_free_avc;
134 allocated = anon_vma; 134 allocated = anon_vma;
135 } 135 }
136 spin_lock(&anon_vma->lock);
137 136
137 spin_lock(&anon_vma->lock);
138 /* page_table_lock to protect against threads */ 138 /* page_table_lock to protect against threads */
139 spin_lock(&mm->page_table_lock); 139 spin_lock(&mm->page_table_lock);
140 if (likely(!vma->anon_vma)) { 140 if (likely(!vma->anon_vma)) {
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma)
144 list_add(&avc->same_vma, &vma->anon_vma_chain); 144 list_add(&avc->same_vma, &vma->anon_vma_chain);
145 list_add(&avc->same_anon_vma, &anon_vma->head); 145 list_add(&avc->same_anon_vma, &anon_vma->head);
146 allocated = NULL; 146 allocated = NULL;
147 avc = NULL;
147 } 148 }
148 spin_unlock(&mm->page_table_lock); 149 spin_unlock(&mm->page_table_lock);
149
150 spin_unlock(&anon_vma->lock); 150 spin_unlock(&anon_vma->lock);
151 if (unlikely(allocated)) { 151
152 if (unlikely(allocated))
152 anon_vma_free(allocated); 153 anon_vma_free(allocated);
154 if (unlikely(avc))
153 anon_vma_chain_free(avc); 155 anon_vma_chain_free(avc);
154 }
155 } 156 }
156 return 0; 157 return 0;
157 158
@@ -182,7 +183,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
182{ 183{
183 struct anon_vma_chain *avc, *pavc; 184 struct anon_vma_chain *avc, *pavc;
184 185
185 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 186 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
186 avc = anon_vma_chain_alloc(); 187 avc = anon_vma_chain_alloc();
187 if (!avc) 188 if (!avc)
188 goto enomem_failure; 189 goto enomem_failure;
@@ -232,6 +233,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
232 out_error_free_anon_vma: 233 out_error_free_anon_vma:
233 anon_vma_free(anon_vma); 234 anon_vma_free(anon_vma);
234 out_error: 235 out_error:
236 unlink_anon_vmas(vma);
235 return -ENOMEM; 237 return -ENOMEM;
236} 238}
237 239
@@ -729,13 +731,29 @@ void page_move_anon_rmap(struct page *page,
729 * @page: the page to add the mapping to 731 * @page: the page to add the mapping to
730 * @vma: the vm area in which the mapping is added 732 * @vma: the vm area in which the mapping is added
731 * @address: the user virtual address mapped 733 * @address: the user virtual address mapped
734 * @exclusive: the page is exclusively owned by the current process
732 */ 735 */
733static void __page_set_anon_rmap(struct page *page, 736static void __page_set_anon_rmap(struct page *page,
734 struct vm_area_struct *vma, unsigned long address) 737 struct vm_area_struct *vma, unsigned long address, int exclusive)
735{ 738{
736 struct anon_vma *anon_vma = vma->anon_vma; 739 struct anon_vma *anon_vma = vma->anon_vma;
737 740
738 BUG_ON(!anon_vma); 741 BUG_ON(!anon_vma);
742
743 /*
744 * If the page isn't exclusively mapped into this vma,
745 * we must use the _oldest_ possible anon_vma for the
746 * page mapping!
747 *
748 * So take the last AVC chain entry in the vma, which is
749 * the deepest ancestor, and use the anon_vma from that.
750 */
751 if (!exclusive) {
752 struct anon_vma_chain *avc;
753 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
754 anon_vma = avc->anon_vma;
755 }
756
739 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 757 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
740 page->mapping = (struct address_space *) anon_vma; 758 page->mapping = (struct address_space *) anon_vma;
741 page->index = linear_page_index(vma, address); 759 page->index = linear_page_index(vma, address);
@@ -790,7 +808,7 @@ void page_add_anon_rmap(struct page *page,
790 VM_BUG_ON(!PageLocked(page)); 808 VM_BUG_ON(!PageLocked(page));
791 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 809 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
792 if (first) 810 if (first)
793 __page_set_anon_rmap(page, vma, address); 811 __page_set_anon_rmap(page, vma, address, 0);
794 else 812 else
795 __page_check_anon_rmap(page, vma, address); 813 __page_check_anon_rmap(page, vma, address);
796} 814}
@@ -812,7 +830,7 @@ void page_add_new_anon_rmap(struct page *page,
812 SetPageSwapBacked(page); 830 SetPageSwapBacked(page);
813 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 831 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
814 __inc_zone_page_state(page, NR_ANON_PAGES); 832 __inc_zone_page_state(page, NR_ANON_PAGES);
815 __page_set_anon_rmap(page, vma, address); 833 __page_set_anon_rmap(page, vma, address, 1);
816 if (page_evictable(page, vma)) 834 if (page_evictable(page, vma))
817 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 835 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
818 else 836 else