aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 92e6757f196e..1a8bf76bfd03 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -80,7 +80,7 @@ static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
80 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 80 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
81} 81}
82 82
83void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 83static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
84{ 84{
85 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 85 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
86} 86}
@@ -314,7 +314,7 @@ void __init anon_vma_init(void)
314 * Getting a lock on a stable anon_vma from a page off the LRU is 314 * Getting a lock on a stable anon_vma from a page off the LRU is
315 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 315 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
316 */ 316 */
317struct anon_vma *page_lock_anon_vma(struct page *page) 317struct anon_vma *__page_lock_anon_vma(struct page *page)
318{ 318{
319 struct anon_vma *anon_vma, *root_anon_vma; 319 struct anon_vma *anon_vma, *root_anon_vma;
320 unsigned long anon_mapping; 320 unsigned long anon_mapping;
@@ -348,6 +348,8 @@ out:
348} 348}
349 349
350void page_unlock_anon_vma(struct anon_vma *anon_vma) 350void page_unlock_anon_vma(struct anon_vma *anon_vma)
351 __releases(&anon_vma->root->lock)
352 __releases(RCU)
351{ 353{
352 anon_vma_unlock(anon_vma); 354 anon_vma_unlock(anon_vma);
353 rcu_read_unlock(); 355 rcu_read_unlock();
@@ -407,7 +409,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
407 * 409 *
408 * On success returns with pte mapped and locked. 410 * On success returns with pte mapped and locked.
409 */ 411 */
410pte_t *page_check_address(struct page *page, struct mm_struct *mm, 412pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
411 unsigned long address, spinlock_t **ptlp, int sync) 413 unsigned long address, spinlock_t **ptlp, int sync)
412{ 414{
413 pgd_t *pgd; 415 pgd_t *pgd;
@@ -745,7 +747,7 @@ int page_mkclean(struct page *page)
745 if (mapping) { 747 if (mapping) {
746 ret = page_mkclean_file(mapping, page); 748 ret = page_mkclean_file(mapping, page);
747 if (page_test_dirty(page)) { 749 if (page_test_dirty(page)) {
748 page_clear_dirty(page); 750 page_clear_dirty(page, 1);
749 ret = 1; 751 ret = 1;
750 } 752 }
751 } 753 }
@@ -780,10 +782,10 @@ void page_move_anon_rmap(struct page *page,
780} 782}
781 783
782/** 784/**
783 * __page_set_anon_rmap - setup new anonymous rmap 785 * __page_set_anon_rmap - set up new anonymous rmap
784 * @page: the page to add the mapping to 786 * @page: Page to add to rmap
785 * @vma: the vm area in which the mapping is added 787 * @vma: VM area to add page to.
786 * @address: the user virtual address mapped 788 * @address: User virtual address of the mapping
787 * @exclusive: the page is exclusively owned by the current process 789 * @exclusive: the page is exclusively owned by the current process
788 */ 790 */
789static void __page_set_anon_rmap(struct page *page, 791static void __page_set_anon_rmap(struct page *page,
@@ -793,25 +795,16 @@ static void __page_set_anon_rmap(struct page *page,
793 795
794 BUG_ON(!anon_vma); 796 BUG_ON(!anon_vma);
795 797
798 if (PageAnon(page))
799 return;
800
796 /* 801 /*
797 * If the page isn't exclusively mapped into this vma, 802 * If the page isn't exclusively mapped into this vma,
798 * we must use the _oldest_ possible anon_vma for the 803 * we must use the _oldest_ possible anon_vma for the
799 * page mapping! 804 * page mapping!
800 */ 805 */
801 if (!exclusive) { 806 if (!exclusive)
802 if (PageAnon(page))
803 return;
804 anon_vma = anon_vma->root; 807 anon_vma = anon_vma->root;
805 } else {
806 /*
807 * In this case, swapped-out-but-not-discarded swap-cache
808 * is remapped. So, no need to update page->mapping here.
809 * We convice anon_vma poitned by page->mapping is not obsolete
810 * because vma->anon_vma is necessary to be a family of it.
811 */
812 if (PageAnon(page))
813 return;
814 }
815 808
816 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 809 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
817 page->mapping = (struct address_space *) anon_vma; 810 page->mapping = (struct address_space *) anon_vma;
@@ -942,7 +935,7 @@ void page_remove_rmap(struct page *page)
942 * containing the swap entry, but page not yet written to swap. 935 * containing the swap entry, but page not yet written to swap.
943 */ 936 */
944 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 937 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
945 page_clear_dirty(page); 938 page_clear_dirty(page, 1);
946 set_page_dirty(page); 939 set_page_dirty(page);
947 } 940 }
948 /* 941 /*