aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 4bad3267537a..07fc94758799 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
133 goto out_enomem_free_avc; 133 goto out_enomem_free_avc;
134 allocated = anon_vma; 134 allocated = anon_vma;
135 } 135 }
136 spin_lock(&anon_vma->lock);
137 136
137 spin_lock(&anon_vma->lock);
138 /* page_table_lock to protect against threads */ 138 /* page_table_lock to protect against threads */
139 spin_lock(&mm->page_table_lock); 139 spin_lock(&mm->page_table_lock);
140 if (likely(!vma->anon_vma)) { 140 if (likely(!vma->anon_vma)) {
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma)
144 list_add(&avc->same_vma, &vma->anon_vma_chain); 144 list_add(&avc->same_vma, &vma->anon_vma_chain);
145 list_add(&avc->same_anon_vma, &anon_vma->head); 145 list_add(&avc->same_anon_vma, &anon_vma->head);
146 allocated = NULL; 146 allocated = NULL;
147 avc = NULL;
147 } 148 }
148 spin_unlock(&mm->page_table_lock); 149 spin_unlock(&mm->page_table_lock);
149
150 spin_unlock(&anon_vma->lock); 150 spin_unlock(&anon_vma->lock);
151 if (unlikely(allocated)) { 151
152 if (unlikely(allocated))
152 anon_vma_free(allocated); 153 anon_vma_free(allocated);
154 if (unlikely(avc))
153 anon_vma_chain_free(avc); 155 anon_vma_chain_free(avc);
154 }
155 } 156 }
156 return 0; 157 return 0;
157 158
@@ -730,23 +731,28 @@ void page_move_anon_rmap(struct page *page,
730 * @page: the page to add the mapping to 731 * @page: the page to add the mapping to
731 * @vma: the vm area in which the mapping is added 732 * @vma: the vm area in which the mapping is added
732 * @address: the user virtual address mapped 733 * @address: the user virtual address mapped
734 * @exclusive: the page is exclusively owned by the current process
733 */ 735 */
734static void __page_set_anon_rmap(struct page *page, 736static void __page_set_anon_rmap(struct page *page,
735 struct vm_area_struct *vma, unsigned long address) 737 struct vm_area_struct *vma, unsigned long address, int exclusive)
736{ 738{
737 struct anon_vma_chain *avc; 739 struct anon_vma *anon_vma = vma->anon_vma;
738 struct anon_vma *anon_vma;
739 740
740 BUG_ON(!vma->anon_vma); 741 BUG_ON(!anon_vma);
741 742
742 /* 743 /*
743 * We must use the _oldest_ possible anon_vma for the page mapping! 744 * If the page isn't exclusively mapped into this vma,
745 * we must use the _oldest_ possible anon_vma for the
746 * page mapping!
744 * 747 *
745 * So take the last AVC chain entry in the vma, which is the deepest 748 * So take the last AVC chain entry in the vma, which is
746 * ancestor, and use the anon_vma from that. 749 * the deepest ancestor, and use the anon_vma from that.
747 */ 750 */
748 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); 751 if (!exclusive) {
749 anon_vma = avc->anon_vma; 752 struct anon_vma_chain *avc;
753 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
754 anon_vma = avc->anon_vma;
755 }
750 756
751 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 757 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
752 page->mapping = (struct address_space *) anon_vma; 758 page->mapping = (struct address_space *) anon_vma;
@@ -802,7 +808,7 @@ void page_add_anon_rmap(struct page *page,
802 VM_BUG_ON(!PageLocked(page)); 808 VM_BUG_ON(!PageLocked(page));
803 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 809 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
804 if (first) 810 if (first)
805 __page_set_anon_rmap(page, vma, address); 811 __page_set_anon_rmap(page, vma, address, 0);
806 else 812 else
807 __page_check_anon_rmap(page, vma, address); 813 __page_check_anon_rmap(page, vma, address);
808} 814}
@@ -824,7 +830,7 @@ void page_add_new_anon_rmap(struct page *page,
824 SetPageSwapBacked(page); 830 SetPageSwapBacked(page);
825 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 831 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
826 __inc_zone_page_state(page, NR_ANON_PAGES); 832 __inc_zone_page_state(page, NR_ANON_PAGES);
827 __page_set_anon_rmap(page, vma, address); 833 __page_set_anon_rmap(page, vma, address, 1);
828 if (page_evictable(page, vma)) 834 if (page_evictable(page, vma))
829 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 835 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
830 else 836 else